epylog/0000755000175000017500000000000012527660516011357 5ustar tiagotiagoepylog/install-sh0000755000175000017500000001273612527655413013374 0ustar tiagotiago#!/bin/sh # # install - install a program, script, or datafile # This comes from X11R5 (mit/util/scripts/install.sh). # # Copyright 1991 by the Massachusetts Institute of Technology # # Permission to use, copy, modify, distribute, and sell this software and its # documentation for any purpose is hereby granted without fee, provided that # the above copyright notice appear in all copies and that both that # copyright notice and this permission notice appear in supporting # documentation, and that the name of M.I.T. not be used in advertising or # publicity pertaining to distribution of the software without specific, # written prior permission. M.I.T. makes no representations about the # suitability of this software for any purpose. It is provided "as is" # without express or implied warranty. # # Calling this script install-sh is preferred over install.sh, to prevent # `make' implicit rules from creating a file called install from it # when there is no Makefile. # # This script is compatible with the BSD install script, but was written # from scratch. It can only install one file at a time, a restriction # shared with many OS's install programs. # set DOITPROG to echo to test this script # Don't use :- since 4.3BSD and earlier shells don't like it. doit="${DOITPROG-}" # put in absolute paths if you don't have them in your path; or use env. vars. mvprog="${MVPROG-mv}" cpprog="${CPPROG-cp}" chmodprog="${CHMODPROG-chmod}" chownprog="${CHOWNPROG-chown}" chgrpprog="${CHGRPPROG-chgrp}" stripprog="${STRIPPROG-strip}" rmprog="${RMPROG-rm}" mkdirprog="${MKDIRPROG-mkdir}" transformbasename="" transform_arg="" instcmd="$mvprog" chmodcmd="$chmodprog 0755" chowncmd="" chgrpcmd="" stripcmd="" rmcmd="$rmprog -f" mvcmd="$mvprog" src="" dst="" dir_arg="" while [ x"$1" != x ]; do case $1 in -c) instcmd="$cpprog" shift continue;; -d) dir_arg=true shift continue;; -m) chmodcmd="$chmodprog $2" shift shift continue;; -o) chowncmd="$chownprog $2" shift shift continue;; -g) chgrpcmd="$chgrpprog $2" shift shift continue;; -s) stripcmd="$stripprog" shift continue;; -t=*) transformarg=`echo $1 | sed 's/-t=//'` shift continue;; -b=*) transformbasename=`echo $1 | sed 's/-b=//'` shift continue;; *) if [ x"$src" = x ] then src=$1 else # this colon is to work around a 386BSD /bin/sh bug : dst=$1 fi shift continue;; esac done if [ x"$src" = x ] then echo "install: no input file specified" exit 1 else true fi if [ x"$dir_arg" != x ]; then dst=$src src="" if [ -d $dst ]; then instcmd=: chmodcmd="" else instcmd=mkdir fi else # Waiting for this to be detected by the "$instcmd $src $dsttmp" command # might cause directories to be created, which would be especially bad # if $src (and thus $dsttmp) contains '*'. if [ -f $src -o -d $src ] then true else echo "install: $src does not exist" exit 1 fi if [ x"$dst" = x ] then echo "install: no destination specified" exit 1 else true fi # If destination is a directory, append the input filename; if your system # does not like double slashes in filenames, you may need to add some logic if [ -d $dst ] then dst="$dst"/`basename $src` else true fi fi ## this sed command emulates the dirname command dstdir=`echo $dst | sed -e 's,[^/]*$,,;s,/$,,;s,^$,.,'` # Make sure that the destination directory exists. # this part is taken from Noah Friedman's mkinstalldirs script # Skip lots of stat calls in the usual case. if [ ! -d "$dstdir" ]; then defaultIFS=' ' IFS="${IFS-${defaultIFS}}" oIFS="${IFS}" # Some sh's can't handle IFS=/ for some reason. IFS='%' set - `echo ${dstdir} | sed -e 's@/@%@g' -e 's@^%@/@'` IFS="${oIFS}" pathcomp='' while [ $# -ne 0 ] ; do pathcomp="${pathcomp}${1}" shift if [ ! -d "${pathcomp}" ] ; then $mkdirprog "${pathcomp}" else true fi pathcomp="${pathcomp}/" done fi if [ x"$dir_arg" != x ] then $doit $instcmd $dst && if [ x"$chowncmd" != x ]; then $doit $chowncmd $dst; else true ; fi && if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dst; else true ; fi && if [ x"$stripcmd" != x ]; then $doit $stripcmd $dst; else true ; fi && if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dst; else true ; fi else # If we're going to rename the final executable, determine the name now. if [ x"$transformarg" = x ] then dstfile=`basename $dst` else dstfile=`basename $dst $transformbasename | sed $transformarg`$transformbasename fi # don't allow the sed command to completely eliminate the filename if [ x"$dstfile" = x ] then dstfile=`basename $dst` else true fi # Make a temp file name in the proper directory. dsttmp=$dstdir/#inst.$$# # Move or copy the file name to the temp name $doit $instcmd $src $dsttmp && trap "rm -f ${dsttmp}" 0 && # and set any options; do chmod last to preserve setuid bits # If any of these fail, we abort the whole thing. If we want to # ignore errors from any of these, just make sure not to ignore # errors from the above "$doit $instcmd $src $dsttmp" command. if [ x"$chowncmd" != x ]; then $doit $chowncmd $dsttmp; else true;fi && if [ x"$chgrpcmd" != x ]; then $doit $chgrpcmd $dsttmp; else true;fi && if [ x"$stripcmd" != x ]; then $doit $stripcmd $dsttmp; else true;fi && if [ x"$chmodcmd" != x ]; then $doit $chmodcmd $dsttmp; else true;fi && # Now rename the file to the real destination. $doit $rmcmd -f $dstdir/$dstfile && $doit $mvcmd $dsttmp $dstdir/$dstfile fi && exit 0 epylog/doc/0000755000175000017500000000000012527655413012124 5ustar tiagotiagoepylog/doc/modules.txt0000644000175000017500000002413012527655413014335 0ustar tiagotiagoModules -------- Modules are the essential part of epylog -- the one that actually does string parsing and report generation. This document helps describe how modules operate. Internal vs. External ---------------------- There are generally two types of modules -- internal and external. External modules are more or less a legacy device left over since the days of DULog and they use the same API as in DULog days. All internal modules must be written in Python and adhere to a very strict API described further down in the document. External modules can be written in any language and intercommunicate with Epylog using a system of environment variables and temporary files. External modules exist only as a convenience feature -- addition of any external modules will make the processing generally less efficient. Internal module API -------------------- Here is how things go when an internal module is invoked: 1. Epylog initializes the logfiles and sets the offsets based either on timestamps, or on hard offsets from offsets.xml. Rotated logfiles are initialized and used as necessary. 2. Epylog starts going through each log line-by-line, unwrapping "Last message repeated" lines as necessary. 3. As each line is received, Epylog consults which modules requested the logfile being processed. Only modules requesting that logfile are invoked. 4. For matching, Epylog checks the regex_map dictionary provided by each module. 5. If there is a match, the handler method for the matching module and the matching line are placed in the processing queue. 6. One of the processing threads picks up the handler and the line and executes the handler. 7. The result returned by the handler is placed back into the queue, where it is added to the result set. 8. Once there is a match, Epylog does not process other handlers and goes on to the next line. This happens unless multimatch is set in epylog.conf. If that option is set, Epylog will try all regexes whether or not one of them matched already. This slows things down significantly. 9. Once all lines have been processed, Epylog notifies all of the threads that they can quit now. 10. Once all threads exited, finalize method of each module is called with the resultset passed to it. The "finalize" method is supposed to return the module report to be added to the final report. Keeping this procedure in mind, it is important to remember the following things when writing an internal module: 1. It must be written in python. 2. It will be invoked with -tt, meaning that you need to make sure that either all your tabs are tabs, or they are spaces. No mixing! 3. __init__ of each module is invoked during Epylog initialization. Do all your regex compiles at that time. Do not do any regex compiles in the handlers -- that is most inefficient. 4. Handler methods will be invoked by processing threads, meaning that they MUST be thread-safe. The purpose of handler methods is to parse the line, do any and all hostname lookups and such, and return a result that can be easily processed in the "finalize" stage. Do NOT access any external module methods for writing -- there is a very good chance that it will cause hemmorhage when several threads modify an object at the same time. Accessing external objects for read-only is OK -- e.g. the regexes you compiled earlier during the __init__ stage. 5. Keep results consistent -- see Results and Resultsets for more info. 6. A resultset is a dictionary, so you cannot rely on the order in which things appeared in the logs. This is not reliable in any case -- with threaded processing some results can arrive in any order, if the processing, such as a hostname lookup, took a long time. 7. Finalize step is not threaded, so feel free to go crazy with the results. 8. Return a report that looks consistent with the rest of the message. Do not go nuts with colors, though -- only highlight the most important information. You will get used to excessive highlighting very quickly and it will lose any meaning. Do not overdo gray/white alternating rows in your report -- they are only useful when there are more than two columns in the row. Results and Resultsets Epylog uses a resultset to keep track of repeating messages. This helps save on memory and simplifies the processing in the finalize stage for most modules. Your handler method should return a dictionary looking like this: {key: int} The key can be any hashable value you've obtained from processing the line given to you. The int is the "multiplier" by which you indicate how many times this event occured. Most commonly you will just pass through the "multiplier" field passed to the handler function, but depending on the data in the line itself, you might need to change the value. E.g. consider the following entries: Apr 10 10:01:20 cartman kernel: 5 underpant gnomes spotted Apr 10 10:01:21 cartman last message repeated 15 times The "message" field of the linemap passed to you will be identical, since epylog will unwrap the "last message repeated" line. However, the "multiplier" field will be "1" in the first case, and "15" in the second case. The result you will return for the first line will be something like: {('cartman', 'underpant gnome'): 5} but for the second line you will need to make sure you do 5*15 for the multiplier value, so your result will look like so: {('cartman', 'underpant gnome'): 75} When Epylog receives these results, it will automatically do the math, so the resultset will only contain one mention of 'underpant gnome' at least as related to hostname 'cartman': {('cartman', 'underpant gnome'): 80} It is therefore useful to key the result by a tuple of values. The epylog.Result class is built around that, which helps during the finalize stage. E.g. to process the resultset from the above two lines, the snippet of code would be: report = '' for hostname in resultset.get_distinct(()): submap = resultset.get_submap((hostname,)) while 1: try: key, mult = submap.popitem() except KeyError: break message = key[0] report += '%s: %s(%d)' % (hostname, message, mult) return report This will produce the following report: cartman: underpant gnome(80) Result class provides several convenience methods, such as get_distinct, get_submap, and get_top, however be aware that they should not be used if you have thousands of entries in the resultset, as they are not very efficient. They are only useful if you go directly from a resultset to a report, without any additional processing. If you have (or anticipate to have) thousands of entries, it is easier to iterate through them one-by-one in order to present the final report. A resultset is, after all, a dictionary, so if you do not want to use any methods from the Result class, you may always just treat the data passed to finalize as a common dict. If your handler method returns {} as a result, the line will be considered processed, but nothing will be added to the resultset (useful when you want to just ignore a line, though weeder_mod.py will do this better). If your method returns a None, it is considered that you could not parse the line, and it will not be considered matched. Nothing will be added to the resultset, and the matching will continue. This is useful if you couldn't parse the line for some reason. Just return a None and let it be added to the unparsed strings. See the code for more info See existing modules for more information, and consult doc/templates/template_mod.py for more details on actual code writing. See also InternalModule, Result, and other classes in the __init__ module of epylog. External module API -------------------- You are discouraged from using external module API, but you might find it useful if you prefer to use something like perl for parsing. All communication between the core of Epylog and external modules is done via the environment variables. There are several variables you should pay attention to: LOGCAT This variable contains the location of a file. The file in question contains raw log entries that the module needs to analyze. LOGREPORT This variable also contains the location of a file, but this file most likely doesn't exist yet. After the module completes its run, it needs to put whatever report it generates into that file. LOGFILTER This variable contains the location of a file as well. All log entries analyzed by the module should go into this file so DULog can fgrep the results against the original file and have only the unparsed data in the end. CONFDIR The location of the config directory. If your module uses any config files, they should be placed into that dir. See epylog.conf(5) for more info. TMPDIR and TMPPREFIX Both these variables are available if you need to create any temporary files, but the use of TMPDIR is STRONGLY discouraged, as well as the use of /tmp or other world-writable locations: since Epylog runs as user root, that makes it succeptible to race-condition attacks, leading to root-exploits. If you need to create a temporary file, use TMPPREFIX as your base and append data to the end of it, i.e. $TMPPREFIX.my. QUIET and DEBUG If QUIET exists and is set, then you shouldn't output anything but critical errors during the run. DEBUG, on the other hand, can have any value from 2 to infinity, but probably not more than 5 for all useful cases. The higher is the DEBUG level, the wordier modules output becomes, although this is up to the module authors. If neither QUIET nor DEBUG are set, then debug level 1 is assumed, at which only useful data is output onto the console. Perl external modules Modules written in Perl can use an Epylog perl module. For more info see Epylog(3). Module Configuration --------------------- See epylog-modules(5) for more info on the epylog module config files. epylog/doc/roadmap.txt0000644000175000017500000000016112527655413014306 0ustar tiagotiagoEpylog Roadmap ------------- /-> 1.0 You are here | * Do bugfixes | \-> 1.1 See Linux@DUKE Bugzilla -- $Id$ epylog/doc/templates/0000755000175000017500000000000012527655413014122 5ustar tiagotiagoepylog/doc/templates/template_mod.py0000644000175000017500000000736512527655413017161 0ustar tiagotiago#!/usr/bin/python -tt import sys import re ## # This is for testing purposes, so you can invoke this from the # modules directory. See also the testing notes at the end of the # file. # sys.path.insert(0, '../py/') from epylog import InternalModule class template_mod(InternalModule): ## # opts: is a map with extra options set in # [conf] section of the module config, or on the # command line using -o flag to the module. # logger: A logging object. API: # logger.put(loglvl, 'Message') # Only critical stuff needs to go onto lvl 0. # Common output goes to lvl 1. # Others are debug levels. # def __init__(self, opts, logger): ## # Do a "super-init" so the class we are subclassing gets # instantiated. # InternalModule.__init__(self) self.logger = logger ## # Convenience # rc = re.compile ## # This map specifies the regexes and the handlers for them. # ===> THIS MAP MUST EXIST AND BE NAMED "regex_map" <== # The format is as follows: #self.regex_map = { # rc('STRING TO MATCH'): self.handler_func, # rc('ANOTHER STRING'): self.other_handler_func # } # self.regex_map = { } ## # Line-matching routines # def handler_func(self, linemap): ## # linemap is a dictionary with the following members: # line: the original, unadulterated line. # stamp: unix timestamp of the event # system: the reporting hostname # message: The actual message # multiplier: This is how many times this event occurs. # Most often this will be set to 1, but it # can have other values as a result of unwrapping # the "last message repeated" lines by epylog. # ## # See the methods in epylog.InternalModule for insight on # which convenience methods are available to you. # ## # WARNING WARNING WARNING # # Please read documents/modules.txt, especially the section about # making your modules THREAD SAFE. You _CANNOT_ access external # objects for writing! This will cause data loss. This step should be # SOLELY for analyzing the log string and doing network lookups. # All processing should be done in finalize stage. # ## # DO SOME STUFF HERE # ## # The result of your computation must be a dictionary with the # following fields: # * A tuple of values used as key # * An integer indicating how many times this occurs. # Usually this will be the "multiplier" carried over, but # somethimes that value will be incremented depending on the # result of your processing. # E.g.: return {('linux0', '2.4.20-9', 'Linux'): 1} # Multiple fields are permitted. # # Returning None will indicate that the line has not been processed # successfully. # return {} def finalize(self, resultset): ## # A resultset is a dictionary of all values returned by your # handler functions -- except they are unique and show how many # times each tuple occurs. # See epylog.Result for some convenience methods to use when # processing and analyzing the results. # return 'REPORT' ## # This is useful when testing your module out. # Invoke without command-line parameters to learn about the proper # invocation. # if __name__ == '__main__': from epylog.helpers import ModuleTest ModuleTest(template_mod, sys.argv) epylog/doc/templates/template.mod.pl0000644000175000017500000000534112527655413017053 0ustar tiagotiago#!/usr/bin/perl -w # template.mod.pl # ---------------- # # Copyright (C) 2001-2002 by Duke University # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA # 02111-1307, USA. # # To view the help for this module, run "perldoc template.mod.pl" # To create the text version, run # "perldoc -t template.mod.pl > template.mod.txt" # For help with POD (plain old documentation) see the "perlpod" # man page. POD is really easy, so check it out! # # $Id$ # # @Author Michael D. Stenner # Konstantin Ryabitsev # @version $Date$ # ## # Strict enforces good coding practices by making you observe the # variable scope. # Add other packages, too. # use strict; use epylog; ## # Main module code. # my $du = new epylog(); $du->init('template'); $du->mlog(1, "beginning to read input"); ## # Iterate through all lines in the logs and analyze the ones that # match. # while ($du->islogeof() == 0) { my $line = $du->nextline(); ## # Put line-analyzing code here. # ## # Add any analyzed lines to filter. # $du->pushfilt($line); } $du->mlog(1, "finished reading input"); ## # See if we've done anything useful. # if ($du->filtsize() > 0){ $du->mlog(1, "generating the report"); $du->pushrep($du->mkrephdr('TEMPLATE ANALYSIS')); ## # Put your report-formatting code here. # } ## # Don't forget to call finalize()! # $du->mlog(1, "Finalizing"); $du->finalize(); $du->mlog(1, "Exiting..."); __END__ =pod =head1 NAME template.mod.pl - template module for epylog. =head1 DESCRIPTION This module analyzes syslog entries and reports them nicely. Currently supports and entry types. =head1 OPTIONS This module accepts a number of options. All options are read from environment variables. These options can be set in the epylog config file (/etc/epylog/epylog.conf by default). The env variables it uses are: =over 5 =item BOGUS This sets BOGUS, making the module output BOGUS. =back =head1 AUTHOR Joe D. Bloe, =head1 REVISION $Revision$ =head1 SEE ALSO epylog(8), epylog-modules(5), epylog(3) =cut epylog/doc/testcase/0000755000175000017500000000000012527655413013737 5ustar tiagotiagoepylog/doc/testcase/packets.testcase0000644000175000017500000000204712527655413017131 0ustar tiagotiagoApr 14 11:39:12 hagrid kernel: iptables: LDROP IN=eth0 OUT= MAC=00:60:08:31:66:70:00:d0:04:f9:7f:fc:08:00 SRC=192.168.230.50 DST=192.168.182.44 LEN=60 TOS=0x00 PREC=0x00 TTL=51 ID=47014 DF PROTO=TCP SPT=2425 DPT=443 WINDOW=5840 RES=0x00 SYN URGP=0 Apr 14 11:39:15 hagrid kernel: iptables: LDROP IN=eth0 OUT= MAC=00:60:08:31:66:70:00:d0:04:f9:7f:fc:08:00 SRC=192.168.230.50 DST=192.168.182.44 LEN=60 TOS=0x00 PREC=0x00 TTL=51 ID=47015 DF PROTO=TCP SPT=2425 DPT=443 WINDOW=5840 RES=0x00 SYN URGP=0 Apr 13 05:22:23 web kernel: Packet log: input REJECT eth0 PROTO=6 182.168.17.77:2557 192.168.60.210:445 L=48 S=0x00 I=18413 F=0x4000 T=98 SYN (#20) Apr 13 05:22:26 web kernel: Packet log: input REJECT eth0 PROTO=6 192.168.96.77:2557 192.168.60.210:445 L=48 S=0x00 I=18667 F=0x4000 T=98 SYN (#20) Oct 13 00:46:33 hostname ipmon[117]: 00:46:33.116780 elxl1 @0:6 b 192.168.21.10,138 -> 192.168.21.255,138 PR udp len 20 229 IN Apr 18 10:42:15 src@annapurna Packet log: input REJECT eth0 PROTO=17 152.3.182.44:37128 152.3.183.255:111 L=128 S=0x00 I=0 F=0x4000 T=64 (#31) epylog/doc/testcase/notices.testcase0000644000175000017500000000404612527655413017144 0ustar tiagotiagoApr 6 12:00:00 hagrid ypserv[17381]: refused connect from 192.168.182.2:59557 to procedure ypproc_match Apr 6 12:00:00 hagrid sshd[17779]: Did not receive identification string from 192.168.182.3 Apr 6 12:00:00 hagrid kernel: Linux version 2.4.18-27.7.x (bhcompile@stripples.devel.redhat.com) (gcc version 2.96 20000731 (Red Hat Linux 7.3 2.96-112)) #1 Fri Mar 14 05:51:23 EST 2003 Apr 9 15:02:11 grads-14/grads-14 kernel: VFS: busy inodes on changed media. Apr 15 23:26:41 grads-29/grads-29 kernel: attempt to access beyond end of device Apr 15 23:26:41 grads-29/grads-29 kernel: 02:00: rw=0, want=10, limit=4 Apr 15 23:26:41 grads-29/grads-29 kernel: Directory sread (sector 0x13) failed Apr 15 23:26:41 grads-29/grads-29 kernel: attempt to access beyond end of device Apr 15 23:26:41 grads-29/grads-29 kernel: 02:00: rw=0, want=10, limit=4 Apr 15 23:26:41 grads-29/grads-29 kernel: Directory sread (sector 0x13) failed Apr 15 23:26:41 grads-29/grads-29 kernel: attempt to access beyond end of device Apr 15 23:26:41 grads-29/grads-29 kernel: 02:00: rw=0, want=10, limit=4 Apr 15 23:26:41 grads-29/grads-29 kernel: Directory sread (sector 0x13) failed Apr 15 23:26:41 grads-29/grads-29 kernel: attempt to access beyond end of device Apr 15 23:26:41 grads-29/grads-29 kernel: 02:00: rw=0, want=10, limit=4 Apr 15 23:26:41 grads-29/grads-29 kernel: Directory sread (sector 0x13) failed Apr 15 23:26:41 grads-29/grads-29 kernel: attempt to access beyond end of device Apr 15 23:26:41 grads-29/grads-29 kernel: 02:00: rw=0, want=10, limit=4 Apr 15 23:26:41 grads-29/grads-29 kernel: Directory sread (sector 0x13) failed Apr 15 23:26:41 grads-29/grads-29 kernel: attempt to access beyond end of device Apr 15 23:26:41 grads-29/grads-29 kernel: 02:00: rw=0, want=10, limit=4 Apr 15 23:26:41 grads-29/grads-29 kernel: Directory sread (sector 0x13) failed Apr 15 23:26:41 grads-29/grads-29 kernel: attempt to access beyond end of device Apr 15 23:26:41 grads-29/grads-29 kernel: 02:00: rw=0, want=10, limit=4 Apr 15 23:26:41 grads-29/grads-29 kernel: Directory sread (sector 0x13) failed epylog/doc/testcase/logins.testcase0000644000175000017500000000575312527655413017001 0ustar tiagotiagoApr 6 12:00:00 hagrid sshd[30260]: Accepted rhosts-rsa for jacd from 192.168.1.3 port 32901 ruser jacd Apr 6 12:00:00 hagrid sshd[28091]: Accepted publickey for ivon from 192.168.65.208 port 36269 ssh2 Apr 6 12:00:00 hagrid sshd[316]: Accepted password for shwetcat from 192.168.25.84 port 47030 Apr 6 12:00:00 hagrid sshd[7136]: Failed password for boyz from 192.168.104.191 port 565 Apr 6 12:00:00 hagrid sshd[31221]: Failed none for illegal user Bogus from 192.168.183.188 port 32982 ssh2 Apr 6 12:00:00 hagrid imapd[16091]: Authenticated user=jdcecile host=login2.dept.school.edu [192.168.182.75] Apr 6 12:00:00 hagrid ipop3d[5613]: Login user=shka host=momentum.dept.school.edu [192.168.169.5] nmsgs=144/144 Apr 6 12:00:00 hagrid ipop3d[804]: Login failed user=elulo auth=elulo host=res-192-168-222-235.dorm.school.edu [192.168.222.235] Apr 6 12:00:00 hagrid IMP[15435]: Login 192.168.182.140 to mail.dept.school.edu:993 as kino Apr 6 12:00:00 hagrid IMP[15398]: FAILED 192.168.182.140 to mail.school.edu:993 as kino Apr 6 12:00:00 hagrid HORDE[27127]: [imp] Login success for hal [192.168.133.234] to {mail.dept.school.edu:993} [on line 64 of "/usr/share/horde/imp/redirect.php"] Apr 6 12:00:00 hagrid HORDE[27126]: [imp] FAILED LOGIN 192.168.133.234 to mail.dept.school.edu:993[imap/ssl/novalidate-cert] as hal [on line 270 of "/usr/share/horde/imp/lib/IMP.php"] Apr 10 12:59:39 login1/login1 time(pam_unix)[13050]: session opened for user jove by (uid=0) Apr 10 12:59:39 login1/login1 gdm(pam_unix)[13050]: session opened for user jove by (uid=0) Apr 10 13:18:17 hagrid/hagrid xscreensaver(pam_unix)[1903]: authentication failure; logname= uid=1567 euid=1567 tty=:0.0 ruser= rhost= user=ivon Apr 10 15:07:18 public0/public0 gdm(pam_unix)[1466]: authentication failure; logname= uid=0 euid=0 tty=:0 ruser= rhost= Apr 11 17:11:21 mail/mail imapd[23246]: Login user=matvey host=tonatiu.dept.school.edu [192.168.183.16] Apr 16 00:45:26 src@monserv sshd(pam_unix)[13904]: authentication failure; logname= uid=0 euid=0 tty=NODEVssh ruser= rhost=fleur.adsl.school.edu user=root Apr 16 00:45:29 src@monserv sshd[13904]: Failed password for root from 192.168.65.208 port 43410 ssh2 Apr 16 00:45:31 src@monserv sshd[13904]: Accepted password for root from 192.168.65.208 port 43410 ssh2 Apr 16 00:45:31 src@monserv sshd(pam_unix)[13904]: session opened for user root by (uid=0) Sep 22 21:52:44 jabber perl: pam_krb5: authentication fails for `mess' Sep 22 21:54:43 jabber perl: pam_krb5: authentication succeeds for `messicar' Sep 25 13:29:59 acpub/acpub gdm[1227]: pam_krb5afs: authentication succeeds for `dof2' Sep 25 23:10:27 src@annapurna imap-login: Aborted login [66.57.254.90] Sep 25 23:10:08 src@annapurna imap-login: Login: icon [66.57.254.90] Sep 25 23:31:21 norbert pop3d-ssl: LOGIN, user=testman, ip=[::ffff:127.0.0.1] Sep 25 23:27:50 norbert pop3d: LOGIN FAILED, ip=[::ffff:127.0.0.1] Jan 23 10:55:03 i5 systemd-logind[1225]: New user mricon logged in. Jan 23 10:55:03 i5 systemd-logind[1225]: New session 2 of user mricon. epylog/doc/testcase/spamd.testcase0000644000175000017500000000122012527655413016573 0ustar tiagotiagoApr 17 16:38:02 mail/mail spamd[25639]: clean message (-1.3/5.0) for cmyk:1337 in 0.0 seconds, 2074 bytes. Apr 17 16:38:02 mail/mail spamd[25636]: clean message (-1.3/5.0) for ivon:1567 in 0.0 seconds, 2075 bytes. Apr 17 16:38:02 mail/mail spamd[25633]: clean message (-1.3/5.0) for srvidal:875 in 0.0 seconds, 2078 bytes. Apr 17 16:38:53 mail/mail spamd[25653]: clean message (0.9/5.0) for sanders:1533 in 0.0 seconds, 3219 bytes. Apr 17 16:39:18 mail/mail spamd[25666]: clean message (0.2/5.0) for mpror:851 in 0.0 seconds, 2923 bytes. Apr 17 16:38:53 mail/mail spamd[25653]: identified spam (10.2/5.0) for sanders:1533 in 0.0 seconds, 3219 bytes. epylog/doc/testcase/weeder.testcase0000644000175000017500000000056612527655413016756 0ustar tiagotiagoApr 15 23:34:15 mail/mail postfix/smtp[18846]: warning: numeric domain name in resource data of MX record for freemail.globalsite.com.br: 192.168.176.25 Apr 15 23:34:47 mail/mail postfix/smtp[18850]: warning: no MX host for coolmusicforyou.net has a valid A record Apr 15 23:34:47 mail/mail postfix/smtp[18853]: warning: no MX host for karlmarxclub.com has a valid A record epylog/doc/testcase/mail.testcase0000644000175000017500000000075012527655413016420 0ustar tiagotiagoApr 6 04:03:12 mail/mail postfix/smtpd[31840]: 2170BA77E0: client=web.dept.school.edu[192.168.182.45] Apr 6 04:03:12 mail/mail postfix/cleanup[31852]: 2170BA77E0: message-id=<20030406080310.1AA0539B1@web.dept.school.edu> Apr 6 04:03:12 mail/mail postfix/nqmgr[1372]: 2170BA77E0: from=, size=4311, nrcpt=1 (queue active) Apr 6 04:03:12 mail/mail postfix/local[31883]: 2170BA77E0: to=, relay=local, delay=0, status=sent ("|/usr/bin/procmail") epylog/ChangeLog0000644000175000017500000000573512527655413013143 0ustar tiagotiagoEpylog-1.0.7 * Re-apply a fix to not return an error with a 0-length log file. * Remove unmaintained spec file. Epylog-1.0.6 * Back out the unfinished work to support timestamped rotation (will be implemented fully in 1.1). Fixes the cron-run problem in 1.0.5. Epylog-1.0.5 * Add systemd-logind support * Add support for GPG encrypting and signing email reports. Epylog-1.0.4 * Be more lenient about syslog format (FC7 changes) (ticket #4) * Add a "save_rawlogs" option to file publisher, and don't save them by default, since that's a tad too paranoid (via Seth Vidal) (ticket #2) Epylog-1.0.3 * Preparing for the submission to Fedora Extras * Ignore "su" performed by crond to cut down on noise * Add a few extra ignore rules for newer OSes * Require python-abi instead of python. NOTE: This will not work on systems with python-2.2 (el3), so change the specfile to Require /usr/bin/python%(%{__python} -c....) or just use my packages. Epylog-1.0.2 * Small modification to collapse login reports * Fix monthmap on python-2.4 Epylog-1.0.1 * Tiny fixes with Cyrus handling routines in logins, which made them not work. * Automatically detect python version during .spec building. * Cleaned up trojans.list so it's less ugly Epylog-1.0 * Out with 1.0 already! * Handle ::ffff: fake ipv6 addresses for hostname resolution * Do not depend on elinks in RPM. * Default setting is to send html-only (so we don't depend on lynx) * Packets module can now sort by port, system, and source. Epylog-0.9.7 * Accepted Makefile patches from Will Newton * Accepted patches for missing logs from Will Newton (#135) * Fixes for bugs/RFEs: #136, #146, #148, #210, and others. * Riabitsev->Ryabitsev. Sigh. Epylog-0.9.6 * Small bugfix in repeated line lookup routines * Switched xml handling code from pyxml to libxml2 * Added pam_krb5 handling for logins module * Added dovecot imap handling * Added courier imap handling * Only load our mytempfile if mkdtemp not available (python < 2.3) Epylog-0.9.5 * Fix for quasi-bug #6 (docstrings added) * Fix for bug #57 * Fix for bug #53 * Cron mode of operation added -- checks for a lockfile (bug #79) Epylog-0.9.4 * Fix for bug #38 (incorrect offsets were causing backtrace) * Normalized logger calls (bug #9) * Enhancements to mail and packets modules Epylog-0.9.3 * Autoconf now used to configure and build epylog. * Mail module now supports qmail. * Additions to weed_dist rules * Perl module removed into a separate RPM package. Epylog-0.9.2 * Notices module reworked to support custom notifications. * Weeder module now supports 'ALL' for enable * Some changes to epylog core to return matched regex as part of linemap. Epylog-0.9.1 * Bugfixes for errors/warnings found by pychecker. * Added doc/INSTALL for those installing not from RPM. Epylog-0.9.0 * Killing old DULog-related changelog settings * Support for internal modules * Threading support * Rewrite of all modules in python * File Publisher support epylog/etc/0000755000175000017500000000000012527655413012132 5ustar tiagotiagoepylog/etc/report_template.html0000644000175000017500000000113112527655413016222 0ustar tiagotiago @@TITLE@@

@@HOSTNAME@@

First event: @@STARTTIME@@
Last event: @@ENDTIME@@


@@MODULE_REPORTS@@

Unparsed Strings:

@@UNPARSED_STRINGS@@

Brought to you by @@VERSION@@

epylog/etc/modules.d/0000755000175000017500000000000012527655413014024 5ustar tiagotiagoepylog/etc/modules.d/logins.conf.in0000644000175000017500000000232712527655413016577 0ustar tiagotiago[module] desc = Logins exec = %%MODULES_DIR%%/logins_mod.py files = /var/log/messages[.#], /var/log/secure[.#] enabled = yes internal = yes outhtml = yes priority = 0 [conf] ## # Only enable things useful for your configuration to speed things # up. The more stuff you enable, the slower matching will be. # enable_pam = 1 enable_xinetd = 1 enable_sshd = 1 enable_uw_imap = 0 enable_dovecot = 0 enable_courier = 0 enable_imp = 0 enable_proftpd = 0 ## # This is a fun setting. You can list domains that are "safe" here. # E.g. if your org's domain is example.com and you generally don't # expect logins from hosts in example.com domain to be suspicious, you # can add "example.com$" as a safe domain. This way anyone logging in from # a remote host not matching *.example.com will be flagged in red and the # full hostname of the connecting machine will be printed in the report. # List multiple values separated by comma. # E.g.: safe_domains = example.com$, foo.edu$ # The default is .*, meaning all domains are considered safe. To turn # this off specify something like: # safe_domains = !.* safe_domains = .* ## # If you have too many systems, wide-scale probing may turn ugly. This # will collapse the reports. systems_collapse = 10 epylog/etc/modules.d/spamd.conf.in0000644000175000017500000000137712527655413016414 0ustar tiagotiago[module] desc = Spamassassin exec = %%MODULES_DIR%%/spamd_mod.py files = /var/log/maillog[.#] enabled = no internal = yes outhtml = yes priority = 7 [conf] ## # Report this many "top ranking users" # report_top = 10 ## # Consider this the spam threshold when reporting the scores. # Anything above this will be flagged as spam. The last column shows # the score and then non-spam/spam in the parenthesis. E.g.: # -1.3 (10/3) -- the mean score is -1.3, 10 messages under spam_threshold, # and 3 messages over it. # spam_threshold = 5 ## # Rank the top users according to this parameter. Valid entries are: # "most spammed" -- sorts by users with the topmost score # "most messages" -- sorts by users who received most messages # sort_by = most spammed epylog/etc/modules.d/Makefile.in0000644000175000017500000000300212527655413016064 0ustar tiagotiagotop_srcdir = @top_srcdir@ srcdir = @top_srcdir@ prefix = @prefix@ exec_prefix = @exec_prefix@ sbindir = @sbindir@ datadir = @datadir@ sysconfdir = @sysconfdir@ localstatedir = @localstatedir@ libdir = @libdir@ mandir = @mandir@ pkgdocdir = $(datadir)/doc/@PACKAGE@-@VERSION@ pkgdatadir = $(datadir)/@PACKAGE@ pkgvardir = $(localstatedir)/lib/@PACKAGE@ pkgconfdir = $(sysconfdir)/@PACKAGE@ PACKAGE = @PACKAGE@ VERSION = @VERSION@ INSTALL = @INSTALL@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ COMPILEDIR_SCRIPT = $(top_srcdir)/compiledir CRON_DIR = @CRON_DIR@ TEMP_DIR = @TEMP_DIR@ PY_MODULE_DIR = @PY_MODULE_DIR@ PERL_MODULE_DIR = @PERL_MODULE_DIR@ MODULES_DIR = $(pkgdatadir)/modules LYNX_BIN = @LYNX_BIN@ INSTALLDIRS = $(pkgconfdir)/modules.d MCONFS = mail packets weeder logins notices spamd all: all-stamp all-stamp: for MCONF in $(MCONFS); do \ sed -e 's|%%pkgconfdir%%|$(pkgconfdir)|g; s|%%MODULES_DIR%%|$(MODULES_DIR)|g' $$MCONF.conf.in > $$MCONF.conf; \ done touch all-stamp install: all installdirs for MCONF in $(MCONFS); do \ $(INSTALL_DATA) $$MCONF.conf $(DESTDIR)$(pkgconfdir)/modules.d/; \ done uninstall: for MCONF in $(MCONFS); do \ $(RM) $(pkgconfdir)/modules.d/$$MCONF.conf; \ done rmdir $(pkgconfdir)/modules.d clean: for MCONF in $(MCONFS); do \ $(RM) $$MCONF.conf; \ done $(RM) all-stamp distclean: clean $(RM) Makefile installdirs: for dir in $(INSTALLDIRS); do \ $(top_srcdir)/mkinstalldirs $(DESTDIR)/$$dir ; \ done epylog/etc/modules.d/weeder.conf.in0000644000175000017500000000154212527655413016555 0ustar tiagotiago[module] desc = Weedeater exec = %%MODULES_DIR%%/weeder_mod.py files = /var/log/messages[.#], /var/log/secure[.#], /var/log/maillog[.#] enabled = yes internal = yes outhtml = yes priority = 10 [conf] ## # Where to look for a weed_dist.cf file. # weed_dist = %%pkgconfdir%%/weed_dist.cf ## # Where to look for a weed_local.cf file # weed_local = %%pkgconfdir%%/weed_local.cf ## # This is where it gets interesting. # If you look into weed_dist.cf, you will notice that the entries # are listed by section titles. List here only the sections that are # relevant to your setup to speed things up. The more sections you # enable, the slower matching will go, as it has to try more # regexes. Note that [ADD] and [REMOVE] sections in weed_local are # special, any other sections in that file will be ignored. # # You can use 'ALL' to enable all sections. # enable = ALL epylog/etc/modules.d/notices.conf.in0000644000175000017500000000132312527655413016743 0ustar tiagotiago[module] desc = Notices exec = %%MODULES_DIR%%/notices_mod.py files = /var/log/messages[.#], /var/log/secure[.#], /var/log/maillog[.#] enabled = yes internal = yes outhtml = yes priority = 7 [conf] ## # Where is your notice_dist.xml file? # notice_dist = %%pkgconfdir%%/notice_dist.xml ## # Add your own notices into notice_local.xml, not into notice_dist.xml! # This way you don't risk missing future revisions to notice_dist.xml # notice_local = %%pkgconfdir%%/notice_local.xml ## # You can list the ids of members from notice_dist.xml here # namely, or you can use ALL to enable all of them. There is no need # to add members from notice_local.xml here -- they will be enabled # automatically. # enable = ALL epylog/etc/modules.d/packets.conf.in0000644000175000017500000000134712527655413016737 0ustar tiagotiago[module] desc = Packet Filter exec = %%MODULES_DIR%%/packets_mod.py files = /var/log/messages[.#] enabled = yes internal = yes outhtml = yes priority = 1 [conf] ## # Where to look for the trojans list. # trojan_list = %%pkgconfdir%%/trojans.list ## # If a remote host hits this many systems, then don't list them namely, # but collapse them into a nice report, e.g.: [50 hosts] # systems_collapse = 5 ## # Useful for massive portscans. Don't list all the ports namely, but # present them in a collapsed view. E.g.: [50 ports] ports_collapse = 5 ## # Enable iptables, ipchains, ipfilter, or all three. # enable_iptables = 1 enable_ipchains = 1 enable_ipfilter = 0 ## # Sort by any of the following: packets, source, system, port # sortby=port epylog/etc/modules.d/mail.conf.in0000644000175000017500000000047712527655413016232 0ustar tiagotiago[module] desc = Mail Report exec = %%MODULES_DIR%%/mail_mod.py files = /var/log/maillog[.#] enabled = yes internal = yes outhtml = yes priority = 5 [conf] ## # Enable sendmail, postfix, or both # enable_sendmail = 1 enable_postfix = 1 enable_qmail = 0 ## # Report at most this many "top things" # top_report_limit = 5 epylog/etc/weed_local.cf0000644000175000017500000000021112527655413014534 0ustar tiagotiago[ADD] ## # Here is where you add your own rules # [REMOVE] ## # Here is where you put the rules (VERBATIM) from the weed_dist.cf file # epylog/etc/epylog.conf.in0000644000175000017500000000225512527655413014711 0ustar tiagotiago## # Main Epylog configuration file. See epylog.conf(5) for more info. # [main] cfgdir = %%pkgconfdir%% tmpdir = %%TEMP_DIR%% vardir = %%pkgvardir%% [report] title = @@HOSTNAME@@ system events: @@LOCALTIME@@ template = %%pkgconfdir%%/report_template.html include_unparsed = yes publishers = mail [mail] method = mail smtpserv = /usr/sbin/sendmail -t mailto = root format = html lynx = %%LYNX_BIN%% include_rawlogs = no rawlogs_limit = 200 ## # GPG encryption requires pygpgme installed # gpg_encrypt = no # If gpg_keyringdir is omitted, we'll use the default ~/.gnupg for the # user running epylog (/root/.gnupg, usually). #gpg_keyringdir = %%pkgconfdir%%/gpg/ # List key ids, can be emails or fingerprints. If omitted, we'll # encrypt to all keys found in the pubring. #gpg_recipients = admin1@example.com, admin2@example.com # List key ids that we should use to sign the report. # If omitted, the report will not be signed, only encrypted. #gpg_signers = epylog@logserv.example.com [file] method = file path = /var/www/html/epylog dirmask = %Y-%b-%d_%a filemask = %H%M save_rawlogs = no expire_in = 7 notify = root@localhost smtpserv = /usr/sbin/sendmail -t pubroot = http://localhost/epylog epylog/etc/Makefile.in0000644000175000017500000000362612527655413014206 0ustar tiagotiagotop_srcdir = @top_srcdir@ srcdir = @top_srcdir@ prefix = @prefix@ exec_prefix = @exec_prefix@ sbindir = @sbindir@ datadir = @datadir@ sysconfdir = @sysconfdir@ localstatedir = @localstatedir@ libdir = @libdir@ mandir = @mandir@ pkgdocdir = $(datadir)/doc/@PACKAGE@-@VERSION@ pkgdatadir = $(datadir)/@PACKAGE@ pkgvardir = $(localstatedir)/lib/@PACKAGE@ pkgconfdir = $(sysconfdir)/@PACKAGE@ PACKAGE = @PACKAGE@ VERSION = @VERSION@ INSTALL = @INSTALL@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ COMPILEDIR_SCRIPT = $(top_srcdir)/compiledir CRON_DIR = @CRON_DIR@ TEMP_DIR = @TEMP_DIR@ PY_MODULE_DIR = @PY_MODULE_DIR@ PERL_MODULE_DIR = @PERL_MODULE_DIR@ MODULES_DIR = $(pkgdatadir)/modules LYNX_BIN = @LYNX_BIN@ SUBDIRS = modules.d INSTALLDIRS = $(pkgconfdir) CFGFILES = epylog.conf notice_dist.xml notice_local.xml report_template.html \ trojans.list weed_dist.cf weed_local.cf all: all-stamp all-stamp: for subdir in $(SUBDIRS) ; do \ $(MAKE) -C $$subdir all ; \ done sed -e 's|%%pkgconfdir%%|$(pkgconfdir)|g; s|%%TEMP_DIR%%|$(TEMP_DIR)|g; s|%%pkgvardir%%|$(pkgvardir)|g; s|%%LYNX_BIN%%|$(LYNX_BIN)|g' \ epylog.conf.in > epylog.conf touch all-stamp install: all installdirs for CFGFILE in $(CFGFILES); do \ $(INSTALL_DATA) $$CFGFILE $(DESTDIR)$(pkgconfdir)/$$CFGFILE; \ done for subdir in $(SUBDIRS) ; do \ $(MAKE) -C $$subdir install ; \ done uninstall: for subdir in $(SUBDIRS) ; do \ $(MAKE) -C $$subdir uninstall ; \ done for CFGFILE in $(CFGFILES); do \ $(RM) $(pkgconfdir)/$$CFGFILE; \ done rmdir $(pkgconfdir) clean: $(RM) epylog.conf for subdir in $(SUBDIRS) ; do \ $(MAKE) -C $$subdir clean ; \ done $(RM) all-stamp distclean: clean $(RM) Makefile for subdir in $(SUBDIRS) ; do \ $(MAKE) -C $$subdir distclean ; \ done installdirs: for dir in $(INSTALLDIRS); do \ $(top_srcdir)/mkinstalldirs $(DESTDIR)/$$dir ; \ done epylog/etc/notice_local.xml0000644000175000017500000000077612527655413015321 0ustar tiagotiago epylog/etc/weed_dist.cf0000644000175000017500000001036112527655413014414 0ustar tiagotiago## # NOTE: # Editing this file is not recommended. If you do, you might miss newer # revisions of this list in the future versions. # See weed_local.cf for instructions on how to add or delete rules. # [pam] \(pam_unix\).*: session closed for \(pam_unix\).*: check pass; [dhcpd] dhcpd: DHCPREQUEST dhcpd: DHCPACK dhcpd: DHCPDISCOVER dhcpd: DHCPOFFER dhcpd: DHCPRELEASE dhcpd: DHCPINFORM [rpc] rpc.mountd: authenticated mount request from rpc.mountd: authenticated unmount request rpc.statd.*: Version .* Starting rpc.statd.*: Caught signal 15, un-registering and exiting [automount] automount.*: expired automount.*: attempting to mount entry automount.*: lookup\(file\): .* failed automount.*: starting automounter automount.*: using kernel protocol automount.*: shutting down automount.*: .* No such key in map [crond] CROND.*: \(mailman\) CMD \(/usr/bin/python CROND.*: \(root\) CMD \(.*/sbin/rmmod -as\) CROND.*: \(root\) CMD \(/usr/lib/sa/sa\d CROND.*: \(root\) CMD \(run-parts anacron.*: Updated timestamp for job [bind] named.*: lame server resolving named.*: .* NS points to CNAME named.*: Response from unexpected source named.*: .* All possible A RR's lame named.*: bad referral named.*: Cleaned cache named.*: USAGE named.*: NSTATS named.*: XSTATS named.*: .* points to a CNAME named.*: denied update from named.*: .* Bogus LOOPBACK [gnome] gnome-name-server.*: input condition is: gnome-name-server.*: name server starting gnome-name-server.*: starting gnome-name-server.*: name server was running gconfd.*: Resolved address gconfd.*: GConf server is not in use gconfd.*: Exiting gconfd.*: starting gconfd.*: .* shutting down cleanly gdm.*: Couldn't authenticate user xscreensaver.*: FAILED LOGIN [sshd] sshd.*: Generating new .* key. sshd.*: .* key generation complete sshd.*: Connection closed sshd.*: Could not reverse map address sshd.*: Received disconnect from sshd.*: error: Could not get shadow information for sshd.*: Invalid user .* from [xinetd] xinetd.*: .* Transport endpoint is not connected xinetd.*: EXIT: [uw-imap] imapd.*: AUTHENTICATE imapd.*: Logout imapd.*: Killed imapd.*: imap.*service init imapd.*: Command stream end of file imapd.*: Autologout imapd.*: Connection reset by peer ipop3d.*: AUTHENTICATE ipop3d.*: Logout ipop3d.*: Killed ipop3d.*: Autologout ipop3d.*: pop3.*service init [courier-imap] imapd.*: Connection, ip=\[\S+\] imapd.*: LOGOUT, user=\S+, ip=\[\S+\] imapd.*: Disconnected, ip=\[\S+\] imapd.*: DISCONNECTED, user=\S+, ip=\[\S+\] imapd.*: LOGOUT, ip=\[\S+\] pop3d.*: Connection, ip=\[\S+\] pop3d.*: LOGOUT, user=\S+, ip=\[\S+\] pop3d.*: Disconnected, ip=\[\S+\] pop3d.*: DISCONNECTED, user=\S+, ip=\[\S+\] pop3d.*: LOGOUT, ip=\[\S+\] [postfix] postfix/smtp\[\d+\]: connect to postfix/smtp\[\d+\]: warning: no MX host postfix/smtp\[\d+\]: warning: numeric domain name in resource data postfix/smtp\[\d+\]: warning: host .* with my own hostname postfix/smtpd.*: connect from postfix/smtpd.*: disconnect from postfix/smtpd.*: TLS connection established postfix/smtpd.*: lost connection postfix/cleanup postfix/pickup [sendmail] sendmail\[.*:.*NOQUEUE: Null connection from sendmail\[.*:.*timeout waiting for input [qmail] qmail:.* new msg qmail:.* end msg qmail:.* status: [spamd] spamd\[.*: info: spamd\[.*: processing message spamd\[.*: checking message spamd\[.*: connection from spamd\[.*: Creating default_prefs [printer] printer: ready to print printer: status change printer: printing printer: peripheral low-power state [pumpd] pumpd.*: renewed lease for interface pumpd.*: configured interface [afpd] afpd.*: ASIP session: afpd.*: afp_flushfork: afpd.*: .*B read,.*B written [ntpd] ntpd.*: kernel time discipline status change [kernel] kernel: application .* uses obsolete OSS audio interface kernel: SELinux: initialized kernel: device .* left promiscuous mode kernel: .*: disabled promiscuous mode usb-uhci.c: interrupt, status PCI: Found IRQ PCI: Sharing IRQ PCI: Setting latency timer kernel: agpgart: Found kernel: agpgart: Putting [misc] modprobe: Can't locate module logger: punching nameserver .* through the firewall HORDE\[\S*\s*\[imp\] Logout LOGIN ON tty. dhclient: DHCPREQUEST dhclient: DHCPACK dhclient: DHCPDISCOVER dhclient: bound to dbus: avc: .* buckets used [systemd] systemd-logind\[\d+\]: Removed session \d+\. ## $Revision$ ## epylog/etc/trojans.list0000644000175000017500000002023212527655413014506 0ustar tiagotiago1/udp Sockets des Troie 2/tcp Death 30/tcp Agent 40421 31/tcp Agent 31 41/tcp Deep Throat 48/tcp DRAT 58/tcp DMSetup 59/tcp DMSetup 79/tcp CDK 81/tcp RemoConChubo 99/tcp Hidden Port 121/tcp Attack Bot 133/tcp Farnaz 142/tcp NetTaxi 146/tcp Infector 146/udp Infector 170/tcp A-trojan 334/tcp Backage 411/tcp Backage 420/tcp Breach 421/tcp TCP Wrappers trojan 455/tcp Fatal Connections 456/tcp Hackers Paradise 513/tcp Grlogin 514/tcp RPC Backdoor 531/tcp Net666, Rasmin 555/tcp Seven-Eleven 605/tcp Secret Service 666/tcp The Ripperz 667/tcp SniperNet 669/tcp DP trojan 692/tcp GayOL 777/tcp AimSpy 808/tcp WinHole 911/tcp Dark Shadow 999/tcp Deep Throat 1000/tcp Der Spaeher 1001/tcp Der Spaeher 1010/tcp Doly Trojan 1011/tcp Doly Trojan 1012/tcp Doly Trojan 1015/tcp Doly Trojan 1016/tcp Doly Trojan 1020/tcp Vampire 1024/tcp NetSpy 1025/tcp Remote Storm 1025/udp Remote Storm 1035/tcp Multidropper 1042/tcp BLA trojan 1045/tcp Rasmin 1049/tcp /sbin/initd 1050/tcp MiniCommand 1053/tcp The Thief 1054/tcp AckCmd 1080/tcp WinHole 1081/tcp WinHole 1082/tcp WinHole 1083/tcp WinHole 1090/tcp Xtreme 1095/tcp RAT 1097/tcp RAT 1098/tcp RAT 1099/tcp RAT 1150/tcp Orion 1151/tcp Orion 1170/tcp PSS 1200/udp NoBackO 1201/udp NoBackO 1207/tcp SoftWAR 1208/tcp Infector 1212/tcp Kaos 1234/tcp SubSeven 1243/tcp BackDoor-G 1245/tcp VooDoo Doll 1255/tcp Scarab 1256/tcp Project nEXT 1269/tcp Matrix 1272/tcp The Matrix 1313/tcp NETrojan 1338/tcp Millenium Worm 1349/tcp Bo dll 1394/tcp GoFriller 1441/tcp Remote Storm 1492/tcp FTP99CMP 1524/tcp Trinoo 1568/tcp Remote Hack 1600/tcp Shivka-Burka 1703/tcp Exploiter 1777/tcp Scarab 1807/tcp SpySender 1966/tcp Fake FTP 1967/tcp WM FTP Server 1969/tcp OpC BO 1981/tcp Bowl, Shockrave 1999/tcp SubSeven 2000/tcp Der Spaeher 2001/tcp Der Späher 2023/tcp Ripper Pro 2080/tcp WinHole 2115/tcp Bugs 2130/udp Mini Backlash 2140/tcp The Invasor 2140/udp Deep Throat 2155/tcp Illusion Mailer 2255/tcp Nirvana 2283/tcp Hvl RAT 2300/tcp Xplorer 2311/tcp Studio 54 2330/tcp Contact 2331/tcp Contact 2332/tcp Contact 2333/tcp Contact 2334/tcp Contact 2335/tcp Contact 2336/tcp Contact 2337/tcp Contact 2338/tcp Contact 2339/tcp Contact 2339/udp Voice Spy 2345/tcp Doly Trojan 2565/tcp Striker trojan 2583/tcp WinCrash 2600/tcp Digital RootBeer 2716/tcp The Prayer 2773/tcp SubSeven 2774/tcp SubSeven 2801/tcp Phineas Phucker 2989/udp RAT 3000/tcp Remote Shut 3024/tcp WinCrash 3031/tcp Microspy 3128/tcp Ringzero 3129/tcp Masters Paradise 3150/tcp The Invasor 3150/udp Deep Throat 3456/tcp Terror trojan 3459/tcp Eclipse 2000 3700/tcp Portal of Doom 3777/tcp PsychWard 3791/tcp Total Solar Eclypse 3801/tcp Total Solar Eclypse 4000/tcp SkyDance 4092/tcp WinCrash 4242/tcp VHM 4321/tcp BoBo 4444/tcp Prosiak 4567/tcp File Nail 4590/tcp ICQ Trojan 4950/tcp ICQ Trogen (Lm) 5000/tcp Back Door Setup 5001/tcp Back Door Setup 5002/tcp cd00r 5010/tcp Solo 5011/tcp OOTLT 5025/tcp WM Remote KeyLogger 5031/tcp Net Metropolitan 5032/tcp Net Metropolitan 5321/tcp Firehotcker 5333/tcp Backage 5343/tcp wCrat 5400/tcp Back Construction 5401/tcp Back Construction 5402/tcp Back Construction 5512/tcp Illusion Mailer 5534/tcp The Flu 5550/tcp Xtcp 5555/tcp ServeMe 5556/tcp BO Facil 5557/tcp BO Facil 5569/tcp Robo-Hack 5637/tcp PC Crasher 5638/tcp PC Crasher 5742/tcp WinCrash 5760/tcp Portmap Remote Root Linux Exploit 5880/tcp Y3K RAT 5882/tcp Y3K RAT 5882/udp Y3K RAT 5888/tcp Y3K RAT 5888/udp Y3K RAT 5889/tcp Y3K RAT 6000/tcp The Thing 6006/tcp Bad Blood 6272/tcp Secret Service 6400/tcp The Thing 6661/tcp TEMan 6666/tcp Dark Connection Inside 6667/tcp Dark FTP 6669/tcp Host Control, Vampire 6670/tcp BackWeb Server 6711/tcp BackDoor-G 6712/tcp Funny trojan 6713/tcp SubSeven 6723/tcp Mstream 6771/tcp Deep Throat 6776/tcp 2000 Cracks 6838/udp Mstream 6883/tcp Delta Source DarkStar (??) 6912/tcp Shit Heep 6939/tcp Indoctrination 6969/tcp GateCrasher 6970/tcp GateCrasher 7000/tcp Exploit Translation Server 7001/tcp Freak88 7215/tcp SubSeven 7300/tcp NetMonitor 7301/tcp NetMonitor 7306/tcp NetMonitor 7307/tcp NetMonitor 7308/tcp NetMonitor 7424/tcp Host Control 7424/udp Host Control 7597/tcp Qaz 7626/tcp Glacier 7777/tcp God Message, Tini 7789/tcp Back Door Setup, ICKiller 7891/tcp The ReVeNgEr 7983/tcp Mstream 8080/tcp Brown Orifice 8787/tcp Back Orifice 2000 8988/tcp BacHack 8989/tcp Rcon 9000/tcp Netministrator 9325/udp Mstream 9400/tcp InCommand 9872/tcp Portal of Doom 9873/tcp Portal of Doom 9874/tcp Portal of Doom 9875/tcp Portal of Doom 9876/tcp Cyber Attacker, Rux 9878/tcp TransScout 9989/tcp Ini-Killer 9999/tcp The Prayer 10000/tcp OpwinTRojan 10005/tcp OpwinTRojan 10067/udp Portal of Doom 10085/tcp Syphillis 10086/tcp Syphillis 10100/tcp Control Total 10101/tcp BrainSpy 10167/udp Portal of Doom 10520/tcp Acid Shivers 10528/tcp Host Control 10607/tcp Coma 10666/udp Ambush 11000/tcp Senna Spy Trojan Generator 11050/tcp Host Control 11051/tcp Host Control 11223/tcp Progenic trojan 12076/tcp Gjamer 12223/tcp Hack-99 KeyLogger 12345/tcp Ashley 12346/tcp Fat Bitch 12349/tcp BioNet 12361/tcp Whack-a-mole 12362/tcp Whack-a-mole 12363/tcp Whack-a-mole 12623/udp DUN Control 12624/tcp ButtMan 12631/tcp Whack Job 12754/tcp Mstream 13000/tcp Senna Spy 13010/tcp Hacker Brasil - HBR 13013/tcp PsychWard 13014/tcp PsychWard 13223/tcp Hack´99 KeyLogger 13473/tcp Chupacabra 14500/tcp PC Invader 14501/tcp PC Invader 14502/tcp PC Invader 14503/tcp PC Invader 15000/tcp NetDemon 15092/tcp Host Control 15104/tcp Mstream 15382/tcp SubZero 15858/tcp CDK 16484/tcp Mosucker 16660/tcp Stacheldraht 16772/tcp ICQ Revenge 16959/tcp SubSeven 16969/tcp Priority 17166/tcp Mosaic 17300/tcp Kuang2 the virus 17449/tcp Kid Terror 17499/tcp CrazzyNet 17500/tcp CrazzyNet 17569/tcp Infector 17593/tcp Audiodoor 17777/tcp Nephron 18753/udp Shaft 19864/tcp ICQ Revenge 20000/tcp Millenium 20001/tcp Millenium 20002/tcp AcidkoR 20005/tcp Mosucker 20023/tcp VP Killer 20034/tcp NetBus 20203/tcp Chupacabra 20331/tcp BLA trojan 20432/tcp Shaft 20433/udp Shaft 21544/tcp GirlFriend, Kid Terror 21554/tcp Exploiter 22222/tcp Donald Dick 23005/tcp NetTrash 23006/tcp NetTrash 23023/tcp Logged 23032/tcp Amanda 23432/tcp Asylum 23456/tcp Evil FTP 23476/tcp Donald Dick 23476/udp Donald Dick 23477/tcp Donald Dick 23777/tcp InetSpy 24000/tcp Infector 25685/tcp Moonpie 25686/tcp Moonpie 25982/tcp Moonpie 26274/udp Delta Source 26681/tcp Voice Spy 27374/tcp Bad Blood 27444/udp Trinoo 27573/tcp SubSeven 27665/tcp Trinoo 28678/tcp Exploiter 29104/tcp NetTrojan 29369/tcp ovasOn 29891/tcp The Unexplained 30000/tcp Infector 30001/tcp ErrOr32 30003/tcp Lamers Death 30029/tcp AOL trojan 30100/tcp NetSphere 30101/tcp NetSphere 30102/tcp NetSphere 30103/tcp NetSphere 30103/udp NetSphere 30133/tcp NetSphere 30303/tcp Sockets des Troie 30947/tcp Intruse 30999/tcp Kuang2 31335/tcp Trinoo 31336/tcp Bo Whack, Butt Funnel 31337/tcp Back Fire 31337/udp Back Orifice 31338/tcp Back Orifice 31338/udp Deep BO 31339/tcp NetSpy (DK) 31666/tcp BOWhack 31785/tcp Hack´a´Tack 31787/tcp Hack´a´Tack 31788/tcp Hack´a´Tack 31789/udp Hack´a´Tack 31790/tcp Hack´a´Tack 31791/udp Hack´a´Tack 31792/tcp Hack´a´Tack 32001/tcp Donald Dick 32100/tcp Peanut Brittle 32418/tcp Acid Battery 33270/tcp Trinity 33333/tcp Blakharaz 33577/tcp Son of PsychWard 33777/tcp Son of PsychWard 33911/tcp Spirit 2000 34324/tcp Big Gluck 34444/tcp Donald Dick 34555/udp Trinoo 35555/udp Trinoo 37237/tcp Mantis 37651/tcp Yet Another Trojan - YAT 40412/tcp The Spy 40421/tcp Agent 40421 40422/tcp Masters Paradise 40423/tcp Masters Paradise 40425/tcp Masters Paradise 40426/tcp Masters Paradise 41337/tcp Storm 41666/tcp RBT 44444/tcp Prosiak 44575/tcp Exploiter 47262/udp Delta Source 49301/tcp OnLine KeyLogger 50130/tcp Enterprise 50505/tcp Sockets des Troie 50766/tcp Fore 51966/tcp Cafeini 52317/tcp Acid Battery 2000 53001/tcp RWS 54283/tcp SubSeven 54320/tcp Back Orifice 2000 54321/tcp Back Orifice 2000 55165/tcp File Manager trojan 55166/tcp WM Trojan Generator 57341/tcp NetRaider 58339/tcp Butt Funnel 60000/tcp Deep Throat 60001/tcp Trinity 60068/tcp Xzip 6000068 60411/tcp Connection 61348/tcp Bunker-Hill 61466/tcp TeleCommando 61603/tcp Bunker-Hill 63485/tcp Bunker-Hill 64101/tcp Taskman 65000/tcp Devil 65390/tcp Eclypse 65421/tcp Jade 65432/tcp The Traitor (= th3tr41t0r) 65432/udp The Traitor (= th3tr41t0r) 65534/tcp /sbin/initd 65535/tcp RC1 trojan epylog/etc/notice_dist.xml0000644000175000017500000000636312527655413015170 0ustar tiagotiago gconfd.*: Failed to get lock.*Failed to create gconfd.*: Error releasing lockfile gconfd.*: .* Could not lock temporary file gconfd.*: .* another process has the lock GConf locking errors Fatal X error Fatal X errors sftp-server.* subsystem request for sftp SFTP activity floppy0:|\(floppy\) Misc floppy errors ypserv.*:\srefused\sconnect\sfrom\s(\S+):\d+\sto\sprocedure\s(\S+) %s denied for %s kernel:\sLinux\sversion\s(\S*) Rebooted with Linux kernel %s sshd\[\S*: Did not receive identification string from (\S*) SSH scan from %s VFS: busy inodes on changed media dirty CDROM mount kernel: cdrom: This disc doesn kernel: .*Make sure there is a disc in the drive. Misc CDROM errors attempt to access beyond end of device rw=\d+, want=\d+, limit=\d+ Directory sread .* failed kernel: bread in fat_access failed Dirty floppy mount [non-indicative] nfs: server (\S+) not responding nfs: server (\S+) OK NFS timeouts to server %s insmod: Hint: insmod errors insmod errors audit\S+:\s+avc:\s+denied\s+\{\s([^\}]+)\s\}.*exe=(\S+).*scontext=(\S+) SELinux: denied "%s" for "%s" (scontext=%s) CROND\S+: \((\S+)\) CMD \(([^\)]+)\) crond\S+: \((\S+)\) CMD \(([^\)]+)\) Cron: user '%s' (%s) device (\S+) entered promiscuous mode (\S+): Promiscuous mode enabled (\S+): enabled promiscuous mode device %s entered promiscuous mode epylog/epylog.in0000644000175000017500000002320212527655413013205 0ustar tiagotiago#!%%PYTHON_BIN%% ## # Copyright (C) 2003 by Duke University # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA # 02111-1307, USA. # # $Id$ # # @Author Konstantin Ryabitsev # @version $Date$ # import os import sys import getopt import time import libxml2 sys.path.insert(0, '%%PY_MODULE_DIR%%') from epylog import * DEFAULT_EPYLOG_CONFIG = '%%pkgconfdir%%/epylog.conf' EPYLOG_PIDFILE = '%%localstatedir%%/run/epylog.pid' def unxmlify_offsets(ofile, logger): """ Take the XML file with offsets and return them as a dictionary. """ logger.put(5, '>epylog.unxmlify_offsets') logger.put(3, 'Checking if we can read "%s"' % ofile) if not os.access(ofile, os.R_OK): logger.put(3, 'Could not read offsets file "%s"' % ofile) logger.put(3, 'Returning blank tuple') logger.put(5, 'epylog.xmlify_offsets') try: logger.put(3, 'Trying to open "%s" for writing.' % ofile) fh = open(ofile, 'w') except IOError: logger.put(0, 'Could not open "%s" for writing! Offsets not saved!') return logger.puthang(3, 'Making XML out of offset map') doc = libxml2.newDoc('1.0') root = doc.newChild(None, 'epylog-offsets', None) for entry in omap: enode = root.newChild(None, 'entry', None) enode.newChild(None, 'log', entry[0]) enode.newChild(None, 'inode', str(entry[1])) enode.newChild(None, 'offset', str(entry[2])) logger.endhang(3) offsets = doc.serialize() doc.freeDoc() logger.put(5, offsets) import fcntl logger.put(3, 'Locking the offsets file') fcntl.flock(fh.fileno(), fcntl.LOCK_EX) logger.puthang(3, 'Writing the offsets into "%s"' % ofile) fh.write(offsets) logger.endhang(3) logger.put(3, 'Unlocking the offsets file') fcntl.flock(fh.fileno(), fcntl.LOCK_UN) fh.close() logger.put(5, 'epylog.restore_offsets') ofile = os.path.join(epylog.vardir, 'offsets.xml') omap = unxmlify_offsets(ofile, logger) for o in omap: try: epylog.logtracker.set_start_offset_by_entry(o[0], o[1], o[2]) except NoSuchLogError: logger.put(0, 'No such log in tracker: %s' % o[0]) logger.put(5, 'epylog.store_offsets') ofile = os.path.join(epylog.vardir, 'offsets.xml') omap = epylog.logtracker.get_offset_map() xmlify_offsets(omap, ofile, logger) logger.put(5, ' Copyright (C) This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA Also add information on how to contact you by electronic and paper mail. If the program is interactive, make it output a short notice like this when it starts in an interactive mode: Gnomovision version 69, Copyright (C) year name of author Gnomovision comes with ABSOLUTELY NO WARRANTY; for details type `show w'. This is free software, and you are welcome to redistribute it under certain conditions; type `show c' for details. The hypothetical commands `show w' and `show c' should show the appropriate parts of the General Public License. Of course, the commands you use may be called something other than `show w' and `show c'; they could even be mouse-clicks or menu items--whatever suits your program. You should also get your employer (if you work as a programmer) or your school, if any, to sign a "copyright disclaimer" for the program, if necessary. Here is a sample; alter the names: Yoyodyne, Inc., hereby disclaims all copyright interest in the program `Gnomovision' (which makes passes at compilers) written by James Hacker. , 1 April 1989 Ty Coon, President of Vice This General Public License does not permit incorporating your program into proprietary programs. If your program is a subroutine library, you may consider it more useful to permit linking proprietary applications with the library. If this is what you want to do, use the GNU Library General Public License instead of this License. epylog/compiledir.in0000644000175000017500000000047412527655413014043 0ustar tiagotiago#!@PYTHON_BIN@ -tt import sys, os.path from compileall import compile_dir if __name__ == '__main__': if len(sys.argv) < 2: sys.exit(0) dirs = sys.argv[1:] for d in dirs: if not os.path.isdir(d): print '%s is not a dir. Throwing away.' % d continue compile_dir(d) epylog/cron/0000755000175000017500000000000012527655413012320 5ustar tiagotiagoepylog/cron/epylog.cron.in0000644000175000017500000000010312527655413015101 0ustar tiagotiago#!/bin/sh # Run epylog daily. # # $Id$ # %%sbindir%%/epylog --cron epylog/cron/Makefile.in0000644000175000017500000000260012527655413014363 0ustar tiagotiagotop_srcdir = @top_srcdir@ srcdir = @top_srcdir@ prefix = @prefix@ exec_prefix = @exec_prefix@ sbindir = @sbindir@ datadir = @datadir@ sysconfdir = @sysconfdir@ localstatedir = @localstatedir@ libdir = @libdir@ mandir = @mandir@ pkgdocdir = $(datadir)/doc/@PACKAGE@-@VERSION@ pkgdatadir = $(datadir)/@PACKAGE@ pkgvardir = $(localstatedir)/lib/@PACKAGE@ pkgconfdir = $(sysconfdir)/@PACKAGE@ PACKAGE = @PACKAGE@ VERSION = @VERSION@ INSTALL = @INSTALL@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ COMPILEDIR_SCRIPT = $(top_srcdir)/compiledir CRON_DIR = @CRON_DIR@ TEMP_DIR = @TEMP_DIR@ PYTHON_BIN = @PYTHON_BIN@ PY_MODULE_DIR = @PY_MODULE_DIR@ PERL_MODULE_DIR = @PERL_MODULE_DIR@ MODULES_DIR = $(pkgdatadir)/modules LYNX_BIN = @LYNX_BIN@ INSTALLDIRS = $(CRON_DIR) CRONFILES = epylog.cron all: all-stamp all-stamp: for CRONFILE in $(CRONFILES); do \ sed 's|%%sbindir%%|$(sbindir)|g' $$CRONFILE.in > $$CRONFILE; \ done touch all-stamp install: all installdirs for CRONFILE in $(CRONFILES); do \ $(INSTALL_SCRIPT) $$CRONFILE $(DESTDIR)$(CRON_DIR)/$$CRONFILE; \ done uninstall: for CRONFILE in $(CRONFILES); do \ $(RM) $(CRON_DIR)/$$CRONFILE; \ done clean: $(RM) $(CRONFILES) $(RM) all-stamp distclean: clean $(RM) Makefile installdirs: for dir in $(INSTALLDIRS); do \ $(top_srcdir)/mkinstalldirs $(DESTDIR)/$$dir ; \ done epylog/modules/0000755000175000017500000000000012527660252013024 5ustar tiagotiagoepylog/modules/spamd_mod.py0000644000175000017500000001320012527655413015340 0ustar tiagotiago#!/usr/bin/python -tt """ Description will eventually go here. """ ## # Copyright (C) 2003 by Duke University # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA # 02111-1307, USA. # # $Id$ # # @Author Konstantin Ryabitsev # @version $Date$ # import sys import re ## # This is for testing purposes, so you can invoke this from the # modules directory. See also the testing notes at the end of the # file. # sys.path.insert(0, '../py/') from epylog import InternalModule, Result class spamd_mod(InternalModule): def __init__(self, opts, logger): InternalModule.__init__(self) self.logger = logger rc = re.compile self.regex_map = { rc('spamd\[\d+\]: clean message'): self.spamd, rc('spamd\[\d+\]: identified spam'): self.spamd } self.top = int(opts.get('report_top', '10')) self.thold = int(opts.get('spam_threshold', '5')) sort_by = opts.get('sort_by', 'most spammed') if sort_by == 'most spammed': self.sort = 'spammed' else: self.sort = 'messages' self.spamd_re = rc('\s\((.*?)/.*for (\S*?):.*in (\S*).*, (\d+) bytes') self.report_wrap = '%s
\n' self.subreport_wrap = '

%s

\n' self.total_title = 'Total stats' self.users_title = 'Top %d ranking users' % self.top self.score_rep = '%.1f (%d/%d)' self.report_line = '%s%s%s%s\n' self.flip = ' bgcolor="#dddddd"' ## # Line-matching routines # def spamd(self, linemap): sys, msg, mult = self.get_smm(linemap) try: score, user, sec, size = self.spamd_re.search(msg).groups() except: logger.put(0, 'Odd spamd line: %s' % msg) return None score = float(score) sec = float(sec) size = int(size) return {(user, score, sec, size): mult} def _mk_score(self, msgs, score, score1, score2): avg_score = float(score/msgs) ret = self.score_rep % (avg_score, score1, score2) return ret def _mk_time_unit(self, secs): mins = int(secs/60) if mins: hrs = int(mins/60) if hrs: days = int(hrs/24) if days: return (days, 'd') return (hrs, 'hr') return (mins, 'min') return (secs, 'sec') def finalize(self, rs): user_rep = '' users = rs.get_distinct(()) t_msgs = 0 t_score_t = 0 t_thold_lt = 0 t_thold_gt = 0 t_secs = 0 t_size = 0 urs = Result() for user in users: submap = rs.get_submap((user,)) msgs = 0 score_t = 0 thold_lt = 0 thold_gt = 0 secs = 0 size = 0 while 1: try: entry, mult = submap.popitem() except KeyError: break msgs += mult score, sec, bytes = entry score_t += score * mult if score < self.thold: thold_lt += mult else: thold_gt += mult secs += sec * mult size += bytes * mult if msgs == 0: continue t_msgs += msgs t_score_t += score_t t_thold_lt += thold_lt t_thold_gt += thold_gt t_secs += secs t_size += size if self.sort == 'spammed': ctr = float(score/msgs) else: ctr = msgs urs.add_result({(user, msgs, score_t, thold_lt, thold_gt, secs, size): ctr}) report = '' report += self.subreport_wrap % self.total_title score = self._mk_score(t_msgs, t_score_t, t_thold_lt, t_thold_gt) time = '%d %s' % self._mk_time_unit(t_secs) size = '%d %s' % self.mk_size_unit(t_size) user = '%d users/%d msgs' % (len(users), t_msgs) report += self.report_line % (self.flip, user, size, time, score) report += self.subreport_wrap % self.users_title flipper = '' for avg, entry in urs.get_top(self.top): if flipper: flipper = '' else: flipper = self.flip user, msgs, score_t, thold_lt, thold_gt, secs, size = entry score = self._mk_score(msgs, score_t, thold_lt, thold_gt) time = '%d %s' % self._mk_time_unit(secs) size = '%d %s' % self.mk_size_unit(size) report += self.report_line % (flipper, user, size, time, score) report = self.report_wrap % report return report ## # This is useful when testing your module out. # Invoke without command-line parameters to learn about the proper # invocation. # if __name__ == '__main__': from epylog.helpers import ModuleTest ModuleTest(spamd_mod, sys.argv) epylog/modules/Makefile.in0000644000175000017500000000250512527655413015076 0ustar tiagotiagotop_srcdir = @top_srcdir@ srcdir = @top_srcdir@ prefix = @prefix@ exec_prefix = @exec_prefix@ sbindir = @sbindir@ datadir = @datadir@ sysconfdir = @sysconfdir@ localstatedir = @localstatedir@ libdir = @libdir@ mandir = @mandir@ pkgdocdir = $(datadir)/doc/@PACKAGE@-@VERSION@ pkgdatadir = $(datadir)/@PACKAGE@ pkgvardir = $(localstatedir)/lib/@PACKAGE@ pkgconfdir = $(sysconfdir)/@PACKAGE@ PACKAGE = @PACKAGE@ VERSION = @VERSION@ INSTALL = @INSTALL@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ COMPILEDIR_SCRIPT = $(top_srcdir)/compiledir CRON_DIR = @CRON_DIR@ TEMP_DIR = @TEMP_DIR@ PYTHON_BIN = @PYTHON_BIN@ PY_MODULE_DIR = @PY_MODULE_DIR@ PERL_MODULE_DIR = @PERL_MODULE_DIR@ MODULES_DIR = $(pkgdatadir)/modules LYNX_BIN = @LYNX_BIN@ INSTALLDIRS = $(MODULES_DIR) EPYMODS = mail_mod notices_mod spamd_mod logins_mod packets_mod weeder_mod all: $(COMPILEDIR_SCRIPT) . install: all installdirs for EPYMOD in $(EPYMODS); do \ $(INSTALL_DATA) $$EPYMOD.py $$EPYMOD.pyc \ $(DESTDIR)$(MODULES_DIR)/; \ done uninstall: for EPYMOD in $(EPYMODS); do \ $(RM) $(MODULES_DIR)/$$EPYMOD.py*; \ done rmdir $(pkgdatadir) clean: $(RM) *.pyc *.pyo distclean: clean $(RM) Makefile installdirs: for dir in $(INSTALLDIRS); do \ $(top_srcdir)/mkinstalldirs $(DESTDIR)/$$dir ; \ done epylog/modules/mail_mod.py0000644000175000017500000003561012527655413015167 0ustar tiagotiago#!/usr/bin/python -tt """ Description will eventually go here. """ ## # Copyright (C) 2003 by Duke University # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA # 02111-1307, USA. # # $Id$ # # @Author Konstantin Ryabitsev # @version $Date$ # import sys import re ## # This is for testing purposes, so you can invoke this from the # modules directory. See also the testing notes at the end of the # file. # sys.path.insert(0, '../py/') from epylog import Result, InternalModule class mail_mod(InternalModule): def __init__(self, opts, logger): InternalModule.__init__(self) self.logger = logger rc = re.compile postfix_map = { rc('postfix/smtpd\[\d+\]:\s\S*:'): self.postfix_smtpd, rc('postfix/n*qmgr\[\d+\]:\s\S*:'): self.postfix_qmgr, rc('postfix/local\[\d+\]:\s\S*:'): self.postfix_local, rc('postfix/smtp\[\d+\]:\s\S*:\sto='): self.postfix_smtp } sendmail_map = { rc('sendmail\['): self.sendmail } qmail_map = { rc('qmail:\s\d+.\d+\sinfo\smsg'): self.qmail_infomsg, rc('qmail:\s\d+.\d+\sstarting\sdelivery'): self.qmail_startdev, rc('qmail:\s\d+.\d+\sdelivery'): self.qmail_delivery, rc('qmail:\s\d+.\d+\sbounce\smsg\s\d+'): self.qmail_bounce } do_postfix = int(opts.get('enable_postfix', '0')) do_sendmail = int(opts.get('enable_sendmail', '1')) do_qmail = int(opts.get('enable_qmail', '0')) self.regex_map = {} if do_postfix: self.regex_map.update(postfix_map) if do_sendmail: self.regex_map.update(sendmail_map) if do_qmail: self.regex_map.update(qmail_map) self.toplim = int(opts.get('top_report_limit', '5')) self.postfix_ident_re = rc('\[\d+\]:\s*([A-Z0-9]*):') self.postfix_smtpd_re = rc('client=\S*\[(\S*)\]') self.postfix_qmgr_re = rc('from=(\S*),.*size=(\d*)') self.postfix_local_re = rc('to=(\S*),.*status=(\S*)\s\((.*)\)') self.postfix_smtp_re = rc('to=(\S*),.*status=(\S*)') self.sendmail_ident_re = rc('sendmail\[\d+\]:\s(.*?):') self.sendmail_fromline_re = rc('from=(.*?),.*size=(\d+),.*relay=(.*)') self.sendmail_ctladdr_re = rc('to=(\"\|.*?),\sctladdr=(\S+).*stat=(\w+)') self.sendmail_toline_re = rc('to=(.*?),.*stat=(\w+)') self.sendmail_from_re = rc('(<.*?>)') self.sendmail_relay_re = rc('(.*?)\s\[(\S*)\]') self.qmail_ident_re = rc('qmail:\s(\d+)') self.qmail_delid_re = rc('delivery\s(\d+):') self.qmail_infoline_re = rc('bytes\s(\d+)\sfrom\s(<.*?>)') self.qmail_startdev_re = rc('to\s\S+\s(\S+)') self.qmail_delivery_re = rc('delivery\s\d+:\s(\S+):') self.procmail_re = rc('/procmail') self.bounce = 0 self.success = 1 self.warning = 2 self.procmail = 3 self.delidref = 4 self.delidid = 5 self.report_wrap = '%s
' self.subreport_wrap = '

%s

\n' self.report_line = '%s%s\n' ## # Line-matching routines # def postfix_smtpd(self, linemap): sys, msg, mult = self.get_smm(linemap) id = self._get_postfix_id(msg) self.logger.put(5, 'id=%s' % id) try: client = self.postfix_smtpd_re.search(msg).group(1) client = self.gethost(client) except: client = None self.logger.put(5, 'client=%s' % client) restuple = self._mk_restuple(sys, id, client=client) return {restuple: mult} def postfix_qmgr(self, linemap): sys, msg, mult = self.get_smm(linemap) id = self._get_postfix_id(msg) self.logger.put(5, 'id=%s' % id) try: sender, size = self.postfix_qmgr_re.search(msg).groups() except: sender, size = (None, 0) size = int(size) self.logger.put(5, 'sender=%s, size=%d' % (sender, size)) restuple = self._mk_restuple(sys, id, sender=sender, size=size) return {restuple: mult} def postfix_local(self, linemap): sys, msg, mult = self.get_smm(linemap) id = self._get_postfix_id(msg) self.logger.put(5, 'id=%s' % id) try: to, status, comment = self.postfix_local_re.search(msg).groups() except: self.logger.put(5, 'Odd postfix/local line: %s' % msg) return None self.logger.put(5, 'to=%s, status=%s, comment=%s' % (to, status, comment)) if status == 'sent': status = self.success elif status == 'bounced': status = self.bounce else: status = self.warning if self.procmail_re.search(comment): extra = (self.procmail, 1) else: extra = None restuple = self._mk_restuple(sys, id, to=to, status=status, extra=extra) return {restuple: mult} def postfix_smtp(self, linemap): sys, msg, mult = self.get_smm(linemap) id = self._get_postfix_id(msg) self.logger.put(5, 'id=%s' % id) try: to, status = self.postfix_smtp_re.search(msg).groups() except: self.logger.put(5, 'Odd postfix/smtp line: %s' % msg) return None self.logger.put(5, 'to=%s, status=%s' % (to, status)) if status == 'sent': status = self.success elif status == 'bounced': status = self.bounce else: status = self.warning restuple = self._mk_restuple(sys, id, to=to, status=status) return {restuple: mult} def sendmail(self, linemap): sys, msg, mult = self.get_smm(linemap) id = self._get_sendmail_id(msg) mo = self.sendmail_fromline_re.search(msg) if mo: sender, size, client = mo.groups() sender = self._fix_sendmail_address(sender) size = int(size) client = self._fix_sendmail_relay(client) restuple = self._mk_restuple(sys, id, client=client, sender=sender, size=size) return {restuple: mult} mo = self.sendmail_ctladdr_re.search(msg) if mo: command, to, status = mo.groups() extra = None if self.procmail_re.search(command): extra = (self.procmail, 1) to = self._fix_sendmail_address(to) if status == 'Sent': status = self.success elif status == 'Deferred': status = self.warning restuple = self._mk_restuple(sys, id, to=to, status=status, extra=extra) return {restuple: mult} mo = self.sendmail_toline_re.search(msg) if mo: to, status = mo.groups() to = self._fix_sendmail_address(to) if status == 'Sent': status = self.success elif status == 'Deferred': status = self.warning restuple = self._mk_restuple(sys, id, to=to, status=status) return {restuple: mult} return None def qmail_infomsg(self, linemap): sys, msg, mult = self.get_smm(linemap) id = self._get_qmail_id(msg) try: size, sender = self.qmail_infoline_re.search(msg).groups() except: size = 0 sender = 'unknown' restuple = self._mk_restuple(sys, id, sender=sender, size=int(size)) return {restuple: mult} def qmail_startdev(self, linemap): sys, msg, mult = self.get_smm(linemap) id = self._get_qmail_id(msg) delid = self._get_qmail_delid(msg) try: to = self.qmail_startdev_re.search(msg).group(1) except: to = 'unknown' extra = (self.delidref, delid) restuple = self._mk_restuple(sys, id, to=to, extra=extra) return {restuple: mult} def qmail_delivery(self, linemap): sys, msg, mult = self.get_smm(linemap) delid = self._get_qmail_delid(msg) try: status = self.qmail_delivery_re.search(msg).group(1) if status == 'success': status = self.success else: status = self.warning except: status = self.warning extra = (self.delidid, 1) restuple = self._mk_restuple(sys, delid, status=status, extra=extra) return {restuple: mult} def qmail_bounce(self, linemap): sys, msg, mult = self.get_smm(linemap) id = self._get_qmail_id(msg) restuple = self._mk_restuple(sys, id, status=self.bounce) return {restuple: mult} ## # HElpers # def _mk_restuple(self, sys, id, client=None, sender=None, to=None, size=0, status=None, extra=None): return (sys, id, client, sender, to, size, status, extra) def _get_postfix_id(self, str): try: id = self.postfix_ident_re.search(str).group(1) except: id = 'unknown' return id def _get_sendmail_id(self, str): try: id = self.sendmail_ident_re.search(str).group(1) except: id = 'unknown' return id def _get_qmail_id(self, str): try: id = self.qmail_ident_re.search(str).group(1) except: id = 'unknown' return id def _get_qmail_delid(self, str): try: id = self.qmail_delid_re.search(str).group(1) except: id = 'unknown' return id def _fix_address(self, address): if address == '<>': address = '' address = self.htmlsafe(address) return address def _fix_sendmail_relay(self, str): try: host, ip = self.sendmail_relay_re.search(str).groups() str = self.gethost(ip) except: pass return str def _fix_sendmail_address(self, str): try: str = self.sendmail_from_re.search(str).group(1) except: str = '<%s>' % str return str def _get_top_report(self, rs, descr): toprep = self.subreport_wrap % (descr % self.toplim) toplist = rs.get_top(self.toplim) for count, member in toplist: key = self._fix_address(member[0]) toprep += self.report_line % (str(count), key) return toprep def finalize(self, rs): ## # Go through the results and make sense out of them # msgdict = {} ## # The problem with qmail is that it logs things inconsistently. # Well, at least not consistently with how epylog expects things # to be. These are hacks to make it work with qmail. # delids = {} delivs = {} while 1: try: msgtup, mult = rs.popitem() except: break extra = None system, id, client, sender, rcpt, size, status, extralst = msgtup if system is None or (id is None or id is 'unknown'): continue ## # Accommodate qmail hacks (except procmail, that's for everyone) # if extralst is not None: if extralst[0] == self.procmail: extra = self.procmail elif extralst[0] == self.delidref: delids[extralst[1]] = (system, id) elif extralst[0] == self.delidid: delivs[id] = status continue key = (system, id) try: msglist = msgdict[key] except KeyError: msglist = [[], [], [], [], [], []] if client is not None: msglist[0].append(client) if sender is not None: msglist[1].append(sender) if rcpt is not None: msglist[2].append(rcpt) if size is not None: msglist[3].append(size) if status is not None: msglist[4].append(status) if extra is not None: msglist[5].append(extra) msgdict[key] = msglist ## # More qmail hacks. # if delids: while 1: try: delid, key = delids.popitem() except: break if key in msgdict: if delid in delivs: msgdict[key][4].append(delivs[delid]) ## # Do some real calculations now that we have the results collapsed. # yrs = Result() # Systems crs = Result() # Clients (Connecting Relays) srs = Result() # Senders rrs = Result() # Recipients totalmsgs = 0 totalsize = 0 warnings = 0 successes = 0 bounces = 0 procmailed = 0 while 1: try: key, val = msgdict.popitem() except: break system, id = key yrs.add_result({(system,): 1}) totalmsgs += 1 clients, senders, rcpts, sizes, stati, extras = val for client in clients: crs.add_result({(client,): 1}) for sender in senders: srs.add_result({(sender,): 1}) for rcpt in rcpts: rrs.add_result({(rcpt,): 1}) for size in sizes: totalsize += size for status in stati: if status == self.warning: warnings += 1 elif status == self.success: successes += 1 elif status == self.bounce: bounces += 1 for extra in extras: if extra == self.procmail: procmailed += 1 rep = self.subreport_wrap % 'General Mail Report' rep += self.report_line % (totalmsgs, 'Total Messages Processed') rep += self.report_line % (successes, 'Total Successful Deliveries') rep += self.report_line % (warnings, 'Total Warnings Issued') rep += self.report_line % (bounces, 'Total Bounced Messages') if procmailed: rep += self.report_line % (procmailed, 'Processed by Procmail') size, unit = self.mk_size_unit(totalsize) rep += self.report_line % ('%d %s' % (size, unit), 'Total Transferred Size') if yrs: rep += self._get_top_report(yrs, 'Top %d active systems') if crs: rep += self._get_top_report(crs, 'Top %d connecting hosts') if srs: rep += self._get_top_report(srs, 'Top %d senders') if rrs: rep += self._get_top_report(rrs, 'Top %d recipients') report = self.report_wrap % rep return report if __name__ == '__main__': from epylog.helpers import ModuleTest ModuleTest(mail_mod, sys.argv) epylog/modules/weeder_mod.py0000644000175000017500000000733512527655413015523 0ustar tiagotiago#!/usr/bin/python -tt """ Description will eventually go here. """ ## # Copyright (C) 2003 by Duke University # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA # 02111-1307, USA. # # $Id$ # # @Author Konstantin Ryabitsev # @version $Date$ # import sys import re ## # This is for testing purposes, so you can invoke this from the # modules directory. See also the testing notes at the end of the # file. # sys.path.insert(0, '../py/') from epylog import InternalModule class weeder_mod(InternalModule): def __init__(self, opts, logger): InternalModule.__init__(self) self.logger = logger rc = re.compile weed_dist = opts.get('weed_dist', '/etc/epylog/weed_dist.cf') weed_local = opts.get('weed_local', '/etc/epylog/weed.local.cf') weed_dist = weed_dist.strip() weed_local = weed_local.strip() logger.put(5, 'weed_dist=%s' % weed_dist) logger.put(5, 'weed_local=%s' % weed_local) weed = {} self.regex_map = {} self.section_re = rc('^\s*\[(.*)\]\s*$') self.comment_re = rc('^\s*#') self.empty_re = rc('^\s*$') for weedfile in [weed_dist, weed_local]: try: weed = self._read_weed(open(weedfile), weed) except: logger.put(5, 'Error reading %s' % weedfile) if not weed: return if 'REMOVE' in weed: removes = weed['REMOVE'] del weed['REMOVE'] for remove in removes: for key in weed.keys(): if remove in weed[key]: regexes = weed[key] weed[key] = [] for regex in regexes: if regex != remove: weed[key].append(regex) enable = opts.get('enable', 'ALL').split(',') if 'ADD' in weed: enable.append('ADD') if enable[0] == 'ALL': enable = weed.keys() for key in enable: key = key.strip() regexes = weed.get(key, []) for regex in regexes: try: regex_re = rc(regex) except: logger.put(5, 'Error compiling regex "%s"' % regex) continue self.regex_map[regex_re] = self.do_weed def _read_weed(self, fh, weed): section = 'default' while 1: line = fh.readline() if not line: break if self.comment_re.search(line): continue if self.empty_re.search(line): continue mo = self.section_re.search(line) if mo: section = mo.group(1) else: try: weed[section].append(line.strip()) except KeyError: weed[section] = [line.strip()] return weed ## # Line-matching routines # def do_weed(self, linemap): return {1: linemap['multiplier']} def finalize(self, rs): report = '

Total messages weeded: %d

' % rs[1] return report if __name__ == '__main__': from epylog.helpers import ModuleTest ModuleTest(weeder_mod, sys.argv) epylog/modules/notices_mod.py0000644000175000017500000001441612527655413015712 0ustar tiagotiago#!/usr/bin/python -tt """ Description will eventually go here. """ ## # Copyright (C) 2003 by Duke University # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA # 02111-1307, USA. # # $Id$ # # @Author Konstantin Ryabitsev # @version $Date$ # import sys import re import libxml2 import os sys.path.insert(0, '../py/') from epylog import InternalModule class notices_mod(InternalModule): def __init__(self, opts, logger): InternalModule.__init__(self) self.logger = logger self.critical = 1 self.normal = 0 self.regex_map = {} self.regex_dict = {} n_dist = opts.get('notice_dist', '/etc/epylog/notice_dist.xml') n_loc = opts.get('notice_local', '/etc/epylog/notice_local.xml') enables = opts.get('enable', 'ALL') if not enables: return enlist = [] for en in enables.split(','): enlist.append(en.strip()) notice_dict = self._parse_notices(n_dist, n_loc, enlist) if not notice_dict: return self._digest_notice_dict(notice_dict) self.ip_re = re.compile('\d+.\d+.\d+.\d+') self.report_wrap = '%s
\n' self.subreport_wrap = '

%s

\n' self.critical_title = 'CRITICAL Notices' self.normal_title = 'General Notices' self.report_line = '%s%s\n' ## # Line matching routines # def handle_notice(self, linemap): sys, msg, mult = self.get_smm(linemap) regex = linemap['regex'] crit, report = self.regex_dict[regex] mo = regex.search(msg) groups = mo.groups() if groups: groups = self._resolver(groups) try: report = report % groups except: pass return {(crit, sys, report): mult} ## # Helper methods # def _resolver(self, groups): ret = [] for member in groups: if self.ip_re.search(member): member = self.gethost(member) ret.append(member) return tuple(ret) def _parse_notices(self, dist, loc, enlist): logger = self.logger notice_dict = {} try: doc = libxml2.parseFile(dist) temp_dict = self._get_notice_dict(doc) if enlist[0] == 'ALL': notice_dict = temp_dict else: for en in enlist: if en in temp_dict: notice_dict[en] = temp_dict[en] doc.freeDoc() except Exception, e: logger.put(0, 'Could not read/parse notices file %s: %s' % (dist, e)) return if os.access(loc, os.R_OK): try: doc = libxml2.parseFile(loc) local_dict = self._get_notice_dict(doc) if local_dict: notice_dict.update(local_dict) doc.freeDoc() except Exception, e: logger.put(0, 'Exception while parsing %s: %s' % (loc, e)) pass return notice_dict def _digest_notice_dict(self, notice_dict): for regexes, crit, report in notice_dict.values(): for regex in regexes: self.regex_dict[regex] = (crit, report) self.regex_map[regex] = self.handle_notice def _get_notice_dict(self, doc): logger = self.logger notice_dict = {} root = doc.getRootElement() node = root.children while node: if node.name == 'notice': crit = self.normal props = node.properties while props: if props.name == 'id': id = props.content elif props.name == 'critical': if props.content.lower() == 'yes': crit = self.critical props = props.next regexes = [] kid = node.children while kid: if kid.name == 'regex': try: regex = re.compile(kid.content) regexes.append(regex) except: logger.put(0, 'Bad regex for "%s": %s' % (id, kid.content)) elif kid.name == 'report': report = kid.content kid = kid.next notice_dict[id] = (regexes, crit, report) node = node.next return notice_dict ## # FINALIZE! # def finalize(self, rs): report = '' reports = {} for urg in [self.critical, self.normal]: reports[urg] = '' for system in rs.get_distinct((urg,)): mymap = rs.get_submap((urg, system,)) messages = [] for message in mymap.keys(): messages.append('%s(%d)' % (message[0], mymap[message])) reports[urg] += self.report_line % (system, '
'.join(messages)) if reports[self.critical]: report += self.subreport_wrap % self.critical_title report += reports[self.critical] if reports[self.normal]: report += self.subreport_wrap % self.normal_title report += reports[self.normal] report = self.report_wrap % report return report if __name__ == '__main__': from epylog.helpers import ModuleTest ModuleTest(notices_mod, sys.argv) epylog/modules/packets_mod.py0000644000175000017500000002753312527660252015701 0ustar tiagotiago#!/usr/bin/python -tt """ Description will eventually go here. """ ## # Copyright (C) 2003 by Duke University # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA # 02111-1307, USA. # # $Id$ # # @Author Konstantin Ryabitsev # @version $Date$ # import sys import re ## # This is for testing purposes, so you can invoke this from the # modules directory. See also the testing notes at the end of the # file. # sys.path.insert(0, '../py/') from epylog import Result, InternalModule class packets_mod(InternalModule): def __init__(self, opts, logger): InternalModule.__init__(self) self.logger = logger rc = re.compile iptables_map = { rc('IN=\S*\sOUT=\S*\s(MAC=\S*\s)?SRC=\S*\sDST=\S*\s'): self.iptables } ipchains_map = { rc('Packet\slog:\s.*PROTO.*'): self.ipchains } ipfilter_map = { rc('ipmon\[\d+\]:'): self.ipfilter } self.regex_map = {} if opts.get('enable_iptables', '1') == '1': self.regex_map.update(iptables_map) if opts.get('enable_ipchains', '0') == '1': self.regex_map.update(ipchains_map) if opts.get('enable_ipfilter', '0') == '1': self.regex_map.update(ipfilter_map) self.sortby = opts.get('sortby', 'packets') self.comment_line_re = rc('^\s*#') self.empty_line_re = rc('^\s*$') self.iptables_logtype_re = rc(':\s.*?(\S+?):*\sIN=') self.iptables_re = rc('SRC=(\S*)\s.*PROTO=(\S*)\s.*DPT=(\S*)') self.ipchains_re = rc('\slog:\s\S+\s(\S*).*\sPROTO=(\d+)\s(\S*):\d*\s\S*:(\d+)') self.ipfilter_re = rc('ipmon\[\d+\]:.*\s(\S+),\d+\s->\s\S+,(\d+)\sPR\s(\S+)') self.etc_services_re = rc('^(\S*)\s+(\S*)') self.trojan_list_re = rc('^(\S*)\s+(.*)') self.etc_protocols_re = rc('^(\S*)\s+(\S*)') svcdict = self._parse_etc_services() trojans = opts.get('trojan_list', '') self.systems_collapse = int(opts.get('systems_collapse', '10')) self.ports_collapse = int(opts.get('ports_collapse', '10')) self.trojan_warning_wrap = '%s' if trojans: svcdict = self._parse_trojan_list(trojans, svcdict) self.svcdict = svcdict self.protodict = self._parse_etc_protocols() self.collapsed_ports_rep = '[%d ports]' self.collapsed_hosts_rep = '[%d hosts]' self.report_wrap = '%s
' self.subreport_wrap = '

%s

\n%s\n' self.line_rep = '%d%s%s%s%s\n' self.flip = ' bgcolor="#dddddd"' def _parse_etc_protocols(self): try: fh = open('/etc/protocols', 'r') except: self.logger.put(0, 'Could not open /etc/protocols for reading!') return {} protodict = {} while 1: line = fh.readline() if not line: break if (self.comment_line_re.search(line) or self.empty_line_re.search(line)): continue try: proto, num = self.etc_protocols_re.search(line).groups() except: continue protodict[num] = proto return protodict def _parse_etc_services(self): try: fh = open('/etc/services', 'r') except: self.logger.put(0, 'Could not open /etc/services for reading!') return {} svcdict = {} while 1: line = fh.readline() if not line: break if (self.comment_line_re.search(line) or self.empty_line_re.search(line)): continue try: service, pproto = self.etc_services_re.search(line).groups() except: continue svcdict[pproto] = service return svcdict def _parse_trojan_list(self, fileloc, svcdict): try: fh = open(fileloc, 'r') except: self.logger.put(0, 'Could not open %s for reading!' % fileloc) return svcdict while 1: line = fh.readline() if not line: break if (self.comment_line_re.search(line) or self.empty_line_re.search(line)): continue try: pproto, trojan = self.trojan_list_re.search(line).groups() except: continue if pproto not in svcdict: svcdict[pproto] = self.trojan_warning_wrap % trojan return svcdict ## # Line-matching routines # def iptables(self, linemap): sys, msg, mult = self.get_smm(linemap) ## # See if it's prepended with a logtype string of sorts. # try: logtype = self.iptables_logtype_re.search(msg).group(1) except: logtype = 'LOGGED' try: src, proto, dpt = self.iptables_re.search(msg).groups() except: self.logger.put(3, 'Unknown iptables entry: %s' % msg) return None source = self.gethost(src) dpt = int(dpt) proto = proto.lower() return {(source, sys, dpt, proto, logtype): mult} def ipchains(self, linemap): sys, msg, mult = self.get_smm(linemap) try: logtype, proto, src, dpt = self.ipchains_re.search(msg).groups() except: self.logger.put(3, 'Unknown ipchains entry: %s' % msg) return None source = self.gethost(src) dpt = int(dpt) proto = self.protodict.get(proto, '??') return {(source, sys, dpt, proto, logtype): mult} def ipfilter(self, linemap): sys, msg, mult = self.get_smm(linemap) try: src, dpt, proto = self.ipfilter_re.search(msg).groups() except: self.logger.put(3, 'Unknown ipfilter entry: %s' % msg) return None source = self.gethost(src) dpt = int(dpt) proto = proto.lower() return {(source, sys, dpt, proto, 'LOGGED'): mult} def _mk_port(self, port): try: desc = '%s (%s)' % (self.svcdict[port], port) except KeyError: desc = port return desc def _addfin(self, fin, packets, source, system, port, logtype): if self.sortby == 'source': fin.append((source, packets, system, port, logtype)) elif self.sortby == 'system': fin.append((system, packets, source, port, logtype)) elif self.sortby == 'port': fin.append((port, packets, source, system, logtype)) else: fin.append((packets, source, system, port, logtype)) ## # Finalize! # def finalize(self, rs): logger = self.logger fin = [] for source in rs.get_distinct(()): dstrs = Result(rs.get_submap((source,))) systems = dstrs.get_distinct(()) if len(systems) >= self.systems_collapse: ## # result will look like so: # 655 | source | [ 25 systems ] | [ 2 ] | [ 2 ports ] | lst # or # 655 | source | [ 25 systems ] | DROP | 22/tcp | ssh # ports = [] logtypes = [] packets = 0 for system in systems: submap = dstrs.get_submap((system,)) while 1: try: entry, mult = submap.popitem() except KeyError: break dpt, proto, logtype = entry if (dpt, proto) not in ports: ports.append((dpt, proto)) if logtype not in logtypes: logtypes.append(logtype) packets += mult if len(ports) > 1: port = (-1, len(ports)) else: port = ports[0] if len(logtypes) > 1: logtype = '[%d]' % len(logtypes) else: logtype = logtypes[0] system = self.collapsed_hosts_rep % len(systems) self._addfin(fin, packets, source, system, port, logtype) else: for system in systems: logger.put(2, 'Processing system %s' % system) dpts = dstrs.get_distinct((system,)) if len(dpts) > self.ports_collapse: ## # Result will look like so: # 655 | source | system | DROP | [ 5 ports ] | lst # logtypes = [] packets = 0 sysrs = Result(dstrs.get_submap((system,))) portmap = dstrs.get_submap((system,)) while 1: try: entry, mult = portmap.popitem() except KeyError: break dpt, proto, logtype = entry logger.put(2, 'Processing port %s' % dpt) if logtype not in logtypes: logtypes.append(logtype) packets += mult if len(logtypes) > 1: logtype = '[%d]' % len(logtypes) else: logtype = logtypes[0] port = (-1, len(dpts)) self._addfin(fin, packets, source, system, port, logtype) else: for dpt in dpts: submap = dstrs.get_submap((system, dpt)) while 1: try: entry, packets = submap.popitem() except KeyError: break proto, logtype = entry port = (dpt, proto) self._addfin(fin, packets, source, system, port, logtype) report = '' flipper = '' fin.sort() if self.sortby == 'packets': fin.reverse() for entry in fin: if flipper: flipper = '' else: flipper = self.flip if self.sortby == 'source': source, packets, system, port, logtype = entry elif self.sortby == 'system': system, packets, source, port, logtype = entry elif self.sortby == 'port': port, packets, source, system, logtype = entry else: packets, source, system, port, logtype = entry if port[0] == -1: port = self.collapsed_ports_rep % port[1] else: port = self._mk_port("%s/%s" % port) report += self.line_rep % (flipper, packets, source, system, logtype, port) report = self.subreport_wrap % ('Firewall Violations', report) report = self.report_wrap % report return report if __name__ == '__main__': from epylog.helpers import ModuleTest ModuleTest(packets_mod, sys.argv) epylog/modules/logins_mod.py0000644000175000017500000007101612527655413015540 0ustar tiagotiago#!/usr/bin/python -tt """ Description will eventually go here. """ ## # Copyright (C) 2003 by Duke University # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA # 02111-1307, USA. # # $Id$ # # @Author Konstantin Ryabitsev # @version $Date$ # import sys import re sys.path.insert(0, '../py/') from epylog import Result, InternalModule class logins_mod(InternalModule): def __init__(self, opts, logger): InternalModule.__init__(self) self.logger = logger self.opts = opts rc = re.compile self.ignore = 0 self.open = 1 self.failure = 2 self.root_open = 11 self.root_failure = 12 ## # Ignore crond(pam_unix), to lessen the noise # self.pam_ignore = ['crond'] self.xinetd_ignore = [] ## # PAM reports # pam_map = { rc('\(pam_unix\)\S*:.*authentication\s*failure'): self.pam_failure, rc('\(pam_unix\)\S*:\ssession\sopened\sfor'): self.pam_open, rc('\(pam_unix\)\S*:\sbad\susername'): self.pam_baduser, rc('\(pam_unix\)\S*:\sauth\scould\snot'): self.pam_chelper_failure, rc('pam_krb5\S*:\s\S+\ssucceeds\sfor'): self.pam_krb5_open, rc('pam_krb5\S*:\s\S+\sfails\sfor'): self.pam_krb5_failure } ## # XINETD reports # xinetd_map = { rc('xinetd\S*: START:'): self.xinetd_start } ## # SSH reports # sshd_map = { rc('sshd\[\S*: Accepted'): self.sshd_open, rc('sshd\[\S*: Failed'): self.sshd_failure } ## # IMAPD and IPOP3D # uw_imap_map = { rc('imapd\[\S*: Login\sfail'): self.uw_imap_failure, rc('imapd\[\S*: Authenticated\suser'): self.uw_imap_open, rc('imapd\[\S*: Login\suser'): self.uw_imap_open, rc('ipop3d\[\S*: Login\sfail'): self.uw_imap_failure, rc('ipop3d\[\S*: Login\suser'): self.uw_imap_open, rc('ipop3d\[\S*: Auth\suser'): self.uw_imap_open } ## # IMP # imp_map = { rc('IMP\[\S*: Login'): self.imp2_open, rc('IMP\[\S*: FAILED'): self.imp2_failure, rc('HORDE\[\S*\s*\[imp\] Login'): self.imp3_open, rc('HORDE\[\S*\s*\[imp\] FAILED'): self.imp3_failure } ## # DOVECOT # dovecot_map = { rc('imap-login:\sLogin:\s'): self.dovecot_open, rc('imap-login:\sAborted\slogin\s'): self.dovecot_failure } ## # Courier-IMAP # courier_map = { rc('\sLOGIN,\suser=\S+,\sip=\[\S+\]'): self.courier_open, rc('\sLOGIN FAILED,\sip=\[\S+\]'): self.courier_failure } ## # Cyrus-IMAP # cyrus_map = { rc('imapd\[\S*: login:'): self.cyrus_open, rc('pop3d\[\S*: login:'): self.cyrus_open, rc('imapd\[\S*: badlogin:'): self.cyrus_failure, rc('pop3d\[\S*: badlogin:'): self.cyrus_failure } ## # Qpopper # qpopper_map = { rc('apop\[\S*:\s\S+\sat\s.*\s\(\S*\):\s-ERR\s\[AUTH\]'): self.qpopper_failure, rc('apop\[\S*:\s\S+\sat\s.*\s\(\S*\):\s-ERR\s\[IN-USE\]'): self.qpopper_failure, rc('apop\[\S*:\s\(\S*\)\sPOP\slogin'): self.qpopper_open } ## # ProFTPD # proftpd_map = { rc('proftpd\[\S*:.*USER.*Login successful'): self.proftpd_open, rc('proftpd\[\S*:.*no such user found'): self.proftpd_failure, rc('proftpd\[\S*:.*Login failed'): self.proftpd_failure } ## # Systemd-Logind # systemd_map = { rc('systemd-logind\[\S*: New user \S+ logged in'): self.systemd_open, rc('systemd-logind\[\S*: New session \d+ of user \S'): self.systemd_open } regex_map = {} if opts.get('enable_pam', "1") != "0": regex_map.update(pam_map) if opts.get('enable_xinetd', "1") != "0": regex_map.update(xinetd_map) if opts.get('enable_sshd', "1") != "0": regex_map.update(sshd_map) self.pam_ignore.append('sshd') if opts.get('enable_uw_imap', "0") != "0": regex_map.update(uw_imap_map) self.xinetd_ignore.append('imaps') if opts.get('enable_imp', "0") != "0": regex_map.update(imp_map) if opts.get('enable_dovecot',"0") != "0": regex_map.update(dovecot_map) if opts.get('enable_courier',"0") != "0": regex_map.update(courier_map) if opts.get('enable_cyrus', "0") != "0": regex_map.update(cyrus_map) if opts.get('enable_qpopper',"0") != "0": regex_map.update(qpopper_map) if opts.get('enable_proftpd',"0") != "0": regex_map.update(proftpd_map) self.pam_ignore.append('ftp') self.xinetd_ignore.append('ftp') if opts.get('enable_systemd', "1") != "0": regex_map.update(systemd_map) self.safe_domains = [] safe_domains = opts.get('safe_domains', '.*') self.systems_collapse = int(opts.get('systems_collapse', '10')) for domain in safe_domains.split(','): domain = domain.strip() if domain: try: domain_re = rc(domain) self.safe_domains.append(domain_re) except: logger.put(0, 'Error compiling domain regex: %s' % domain) logger.put(0, 'Check config for Logins module!') self.regex_map = regex_map self.pam_service_re = rc('(\S+)\(pam_unix\)') self.pam_failure_re = rc('.*\slogname=(\S*).*\srhost=(\S*)') self.pam_failure_user_re = rc('\suser=(\S*)') self.pam_open_re = rc('.*for user (\S+) by\s(\S*)\s*\(uid=(\S+)\)') self.pam_failure_more_re = rc('(\S+)\smore\sauthentication\sfailures') self.pam_baduser_re = rc('\sbad\susername\s\[(.*)\]') self.pam_chelper_re = rc('password\sfor\s\[(.*)\]') self.pam_krb5_re = rc("^(\S+?)\[*\d*\]*:\spam_krb5\S*:\sauth.*\sfor\s`(\S+)'") self.xinetd_start_re = rc('START:\s*(\S*)\s') self.sshd_open_ruser_re = rc('Accepted\s(\S*)\sfor\s(\S*)\sfrom\s(\S*)\sport\s\d*\sruser\s(\S*)\s*(\S*)') self.sshd_open_re = rc('Accepted\s(\S*)\sfor\s(\S*)\sfrom\s(\S*)\sport\s\d+\s*(\S*)') self.sshd_fail_re = rc('Failed\s(\S*)\sfor.*\s(\S+)\sfrom\s(\S*)\sport\s\d*\s*(\S*)') self.uw_imap_fail_re = rc('auth=(.*)\shost=.*\[(\S*)\]') self.uw_imap_open_re = rc('user=(.*)\shost=.*\[(\S*)\]') self.uw_imap_service_re = rc('^(\S*)\[\d*\]:') self.dovecot_open_re = rc('Login:\s(\S+)\s\[(\S+)\]') self.dovecot_failure_re = rc('Aborted\slogin\s\[(\S+)\]') self.courier_open_re = rc('^(\S+?):.*\suser=(\S+),\sip=\[(\S+)\]') self.courier_failure_re = rc('^(\S+?):.*,\sip=\[(\S+)\]') self.imp2_open_re = rc('Login\s(\S*)\sto\s(\S*):\S*\sas\s(\S*)') self.imp2_fail_re = rc('FAILED\s(\S*)\sto\s(\S*):\S*\sas\s(\S*)') self.imp3_open_re = rc('success\sfor\s(\S*)\s\[(\S*)\]\sto\s\{(\S*):') self.imp3_fail_re = rc('LOGIN\s(\S*)\sto\s(\S*):\S*\sas\s(\S*)') self.proftpd_open_re = rc('proftpd\[\S*:.*\[(\S+)\].*USER\s(.*):\sLogin\ssuccessful') self.proftpd_failure_re = rc('proftpd\[\S*:.*\[(\S+)\].*USER\s([^:\s]*)') self.qpopper_open_re = rc('user "(.*)" at \(.*\)\s(\S*)') self.qpopper_fail_re = rc(':\s(.*)\sat\s(\S*)') self.cyrus_open_re = rc('login:.*\[(\S*)\]\s(\S*)\s') self.cyrus_fail_re = rc('badlogin:.*\[(\S*)\]\s\S\s(\S*)\sSASL') self.cyrus_service_re = rc('^(\S*)\[\d*\]:') self.systemd_new_user_re = rc('New\suser\s(\S+)\slogged\sin\.') self.systemd_new_session_re = rc('New\ssession\s\d+\sof\suser\s(\S+)\.') self.sshd_methods = {'password': 'pw', 'publickey': 'pk', 'rhosts-rsa': 'rsa', 'rsa': 'rsa', 'hostbased': 'host', 'none': 'none'} self.report_wrap = '%s
' self.subreport_wrap = '

%s

\n%s\n' self.root_failures_title = 'ROOT FAILURES' self.root_logins_title = 'ROOT Logins' self.user_failures_title = 'User Failures' self.user_logins_title = 'User Logins' self.untrusted_host = '%(system)s::%(rhost)s' self.flip = ' bgcolor="#dddddd"' self.line_rep = '%s%s%s\n' self.collapsed_rep = '%s [%s more skipped]' ## # LINE MATCHING ROUTINES # def general_ignore(self, linemap): restuple = (self.ignore, None, None, None) return {restuple: 1} def pam_failure(self, linemap): action = self.failure self.logger.put(5, 'pam_failure invoked') system, message, mult = self.get_smm(linemap) service = self._get_pam_service(message) mo = self.pam_failure_re.search(message) if not mo: self.logger.put(3, 'Odd pam failure string: %s' % message) return None byuser, rhost = mo.groups() mo = self.pam_failure_user_re.search(message) if mo: user = mo.group(1) else: user = 'unknown' if ((service == 'xscreensaver' and user == 'root') or service == 'sshd' or service == 'imap'): ## # xscreensaver will always fail as root. # SSHD is better handled by sshd part itself. # Imap failures are caught by imap routines. # Ignore these. # result = self.general_ignore(linemap) return result mo = self.pam_failure_more_re.search(message) if mo: mult += int(mo.group(1)) restuple = self._mk_restuple(action, system, service, user, byuser, rhost) return {restuple: mult} def pam_open(self, linemap): action = self.open system, message, mult = self.get_smm(linemap) service = self._get_pam_service(message) if service in self.pam_ignore: ## # the service will do a much better job. # result = self.general_ignore(linemap) return result mo = self.pam_open_re.search(message) if not mo: self.logger.put(3, 'Odd pam open string: %s' % message) return None user, byuser, byuid = mo.groups() if byuser == '': byuser = self.getuname(int(byuid)) restuple = self._mk_restuple(action, system, service, user, byuser, '') return {restuple: mult} def pam_baduser(self, linemap): action = self.failure system, message, mult = self.get_smm(linemap) mo = self.pam_baduser_re.search(message) if not mo: self.logger.put(3, 'Odd pam bad user string: %s' % message) return None user = mo.group(1) service = self._get_pam_service(message) restuple = self._mk_restuple(action, system, service, user, '', '') return {restuple: mult} def pam_chelper_failure(self, linemap): action = self.failure system, message, mult = self.get_smm(linemap) mo = self.pam_chelper_re.search(message) if not mo: self.logger.put(3, 'Odd pam console helper string: %s' % message) return None user = mo.group(1) service = self._get_pam_service(message) restuple = self._mk_restuple(action, system, service, user, '', '') return {restuple: mult} def pam_krb5_open(self, linemap): action = self.open system, message, mult = self.get_smm(linemap) mo = self.pam_krb5_re.search(message) if not mo: self.logger.put(3, 'Odd pam_krb5 succeeds line: %s' % message) return None service = mo.group(1) user = mo.group(2) if service == 'sshd': ## # sshd_open will do a much better job. # result = self.general_ignore(linemap) return result restuple = self._mk_restuple(action, system, service, user, '', '') return {restuple: mult} def pam_krb5_failure(self, linemap): action = self.failure system, message, mult = self.get_smm(linemap) mo = self.pam_krb5_re.search(message) if not mo: self.logger.put(3, 'Odd pam_krb5 failure line: %s' % message) return None service = mo.group(1) user = mo.group(2) if ((service == 'xscreensaver' and user == 'root') or service == 'sshd' or service == 'imap'): ## # xscreensaver will always fail as root. # SSHD is better handled by sshd part itself. # Imap failures are caught by imap routines. # Ignore these. # result = self.general_ignore(linemap) return result restuple = self._mk_restuple(action, system, service, user, '', '') return {restuple: mult} def xinetd_start(self, linemap): action = self.open system, message, mult = self.get_smm(linemap) mo = self.xinetd_start_re.search(message) if not mo: self.logger.put(3, 'Odd xinetd start string: %s' % message) return None service = mo.group(1) if service in self.xinetd_ignore: ## # the service will do a much better job. # result = self.general_ignore(linemap) return result restuple = self._mk_restuple(action, system, service, '', '', '') return {restuple: mult} def sshd_open(self, linemap): action = self.open system, message, mult = self.get_smm(linemap) ruser = '' mo1 = self.sshd_open_ruser_re.search(message) mo2 = self.sshd_open_re.search(message) if mo1: method, user, rhost, ruser, service = mo1.groups() elif mo2: method, user, rhost, service = mo2.groups() else: self.logger.put(3, 'Odd sshd open string: %s' % message) return None method = self.sshd_methods.get(method, '??') rhost = self.gethost(rhost) if not service: service = 'ssh1' service = '%s(%s)' % (service, method) restuple = self._mk_restuple(action, system, service, user, ruser, rhost) return {restuple: mult} def sshd_failure(self, linemap): action = self.failure system, message, mult = self.get_smm(linemap) mo = self.sshd_fail_re.search(message) if not mo: self.logger.put(3, 'Odd sshd FAILURE string: %s' % message) return None method, user, rhost, service = mo.groups() method = self.sshd_methods.get(method, '??') rhost = self.gethost(rhost) if not service: service = 'ssh1' service = '%s(%s)' % (service, method) restuple = self._mk_restuple(action, system, service, user, '', rhost) return {restuple: mult} def uw_imap_failure(self, linemap): action = self.failure system, message, mult = self.get_smm(linemap) service = self._get_uw_imap_service(message) service = '%s(uw)' % service mo = self.uw_imap_fail_re.search(message) if not mo: self.logger.put(3, 'Odd imap FAILURE string: %s' % message) return None user, rhost = mo.groups() rhost = self.gethost(rhost) restuple = self._mk_restuple(action, system, service, user, '', rhost) return {restuple: mult} def uw_imap_open(self, linemap): action = self.open system, message, mult = self.get_smm(linemap) service = self._get_uw_imap_service(message) service = '%s(uw)' % service mo = self.uw_imap_open_re.search(message) if not mo: self.logger.put(3, 'Odd imap open string: %s' % message) return None user, rhost = mo.groups() rhost = self.gethost(rhost) restuple = self._mk_restuple(action, system, service, user, '', rhost) return {restuple: mult} def dovecot_open(self, linemap): action = self.open system, message, mult = self.get_smm(linemap) service = 'imap(dc)' mo = self.dovecot_open_re.search(message) if not mo: self.logger.put(3, 'Odd dovecot OPEN string: %s' % message) return None user, rhost = mo.groups() rhost = self.gethost(rhost) restuple = self._mk_restuple(action, system, service, user, '', rhost) return {restuple: mult} def dovecot_failure(self, linemap): action = self.failure system, message, mult = self.get_smm(linemap) service = 'imap(dc)' mo = self.dovecot_failure_re.search(message) if not mo: self.logger.put(3, 'Odd dovecot FAILURE string: %s' % message) return None rhost = mo.group(1) rhost = self.gethost(rhost) user = 'unknown' restuple = self._mk_restuple(action, system, service, user, '', rhost) return {restuple: mult} def systemd_open(self, linemap): action = self.open system, message, mult = self.get_smm(linemap) mo = self.systemd_new_user_re.search(message) if not mo: mo = self.systemd_new_session_re.search(message) if not mo: self.logger.put(3, 'Odd systemd OPEN string: %s' % message) return None service = 'systemd-logind' user = mo.group(1) restuple = self._mk_restuple(action, system, service, user, '', 'localhost') return {restuple: mult} def courier_open(self, linemap): action = self.open system, message, mult = self.get_smm(linemap) mo = self.courier_open_re.search(message) if not mo: self.logger.put(3, 'Odd courier OPEN string: %s' % message) return None service, user, rhost = mo.groups() service = '%s(cr)' % service rhost = self.gethost(rhost) restuple = self._mk_restuple(action, system, service, user, '', rhost) return {restuple: mult} def courier_failure(self, linemap): action = self.failure system, message, mult = self.get_smm(linemap) mo = self.courier_failure_re.search(message) if not mo: self.logger.put(3, 'Odd courier FAILURE string: %s' % message) return None service, rhost = mo.groups() service = '%s(cr)' % service rhost = self.gethost(rhost) user = 'unknown' restuple = self._mk_restuple(action, system, service, user, '', rhost) return {restuple: mult} def proftpd_open(self, linemap): action = self.open system, message, mult = self.get_smm(linemap) mo = self.proftpd_open_re.search(message) if not mo: self.logger.put(3, 'Odd ProFTPD OPEN string: %s' % message) return None service = 'ftp(pro)' rhost, user = mo.groups() rhost = self.gethost(rhost) restuple = self._mk_restuple(action, system, service, user, '', rhost) return {restuple: mult} def proftpd_failure(self, linemap): action = self.failure system, message, mult = self.get_smm(linemap) mo = self.proftpd_failure_re.search(message) if not mo: self.logger.put(3, 'Odd ProFTPD FAILURE string: %s' % message) return None service = 'ftp(pro)' rhost, user = mo.groups() rhost = self.gethost(rhost) restuple = self._mk_restuple(action, system, service, user, '', rhost) return {restuple: mult} def imp2_failure(self, linemap): action = self.failure system, message, mult = self.get_smm(linemap) mo = self.imp2_fail_re.search(message) if not mo: self.logger.put(3, 'Odd IMP failure string: %s' % message) return None rhost, system, user = mo.groups() rhost = self.gethost(rhost) service = 'IMP2' restuple = self._mk_restuple(action, system, service, user, '', rhost) return {restuple: mult} def imp2_open(self, linemap): action = self.open system, message, mult = self.get_smm(linemap) mo = self.imp2_open_re.search(message) if not mo: self.logger.put(3, 'Odd IMP open string: %s' % message) return None rhost, system, user = mo.groups() rhost = self.gethost(rhost) service = 'IMP2' restuple = self._mk_restuple(action, system, service, user, '', rhost) return {restuple: mult} def imp3_failure(self, linemap): action = self.failure system, message, mult = self.get_smm(linemap) mo = self.imp3_fail_re.search(message) if not mo: self.logger.put(3, 'Odd IMP failure string: %s' % message) return None rhost, system, user = mo.groups() rhost = self.gethost(rhost) service = 'IMP3' restuple = self._mk_restuple(action, system, service, user, '', rhost) return {restuple: mult} def imp3_open(self, linemap): action = self.open system, message, mult = self.get_smm(linemap) mo = self.imp3_open_re.search(message) if not mo: self.logger.put(3, 'Odd IMP open string: %s' % message) return None user, rhost, system = mo.groups() rhost = self.gethost(rhost) service = 'IMP3' restuple = self._mk_restuple(action, system, service, user, '', rhost) return {restuple: mult} def cyrus_failure(self,linemap): action = self.failure system, message, mult = self.get_smm(linemap) service = self._get_cyrus_service(message) mo = self.cyrus_fail_re.search(message) if not mo: self.logger.put(3, 'Odd cyrus FAILURE string: %s' % message) return None rhost, user = mo.groups() rhost = self.gethost(rhost) restuple = self._mk_restuple(action, system, service, user, '', rhost) return {restuple: mult} def cyrus_open(self,linemap): action = self.open system, message, mult = self.get_smm(linemap) service = self._get_cyrus_service(message) mo = self.cyrus_open_re.search(message) if not mo: self.logger.put(3, 'Odd cyrus open string: %s' % message) return None rhost, user = mo.groups() rhost = self.gethost(rhost) restuple = self._mk_restuple(action, system, service, user, '', rhost) return {restuple: mult} def qpopper_failure(self, linemap): action = self.failure system, message, mult = self.get_smm(linemap) mo = self.qpopper_fail_re.search(message) if not mo: self.logger.put(3, 'Odd qpopper FAILURE string: %s' % message) return None user, rhost = mo.groups() rhost = self.gethost(rhost) service = 'qpopper' restuple = self._mk_restuple(action, system, service, user, '', rhost) return {restuple: mult} def qpopper_open(self, linemap): action = self.open system, message, mult = self.get_smm(linemap) mo = self.qpopper_open_re.search(message) if not mo: self.logger.put(3, 'Odd qpopper open string: %s' % message) return None user, rhost = mo.groups() rhost = self.gethost(rhost) service = 'qpopper' restuple = self._mk_restuple(action, system, service, user, '', rhost) return {restuple: mult} ## # HELPER METHODS # def _mk_restuple(self, action, system, service, user, byuser, rhost): if user == '': user = 'unknown' if user == 'root' or user == 'ROOT': action += 10 remote = self._mk_userat(byuser, rhost) restuple = (action, system, service, remote) else: if rhost: match = 0 for domain_re in self.safe_domains: if domain_re.search(rhost): match = 1 break if not match: tmp = {'system': system, 'rhost': rhost} system = self.untrusted_host % tmp restuple = (action, user, service, system) return restuple def _mk_dots(self, str, lim): if len(str) > lim: start = -(lim-2) str = '..' + str[start:] return str def _get_pam_service(self, str): service = 'unknown' mo = self.pam_service_re.search(str) if mo: service = mo.group(1) return service def _get_uw_imap_service(self, str): service = 'unknown' mo = self.uw_imap_service_re.search(str) if mo: service = mo.group(1) return service def _mk_userat(self, user, host): if user and host: userat = '%s@%s' % (user, host) elif user: userat = user elif host: userat = '@%s' % host else: userat = 'unknown' return userat def _get_cyrus_service(self, str): service = 'unknown' mo = self.cyrus_service_re.search(str) if mo: service = mo.group(1) return service ## # FINALIZE!! # def finalize(self, rs): logger = self.logger ## # Prepare report # report = '' rep = {} for action in [self.root_failure, self.root_open, self.failure, self.open]: rep[action] = '' flipper = '' for key in rs.get_distinct((action,)): if flipper: flipper = '' else: flipper = self.flip service_rep = [] for service in rs.get_distinct((action, key)): mymap = rs.get_submap((action, key, service)) key2s = [] for key2 in mymap.keys(): loghost = key2[0] key2s.append('%s(%d)' % (loghost, mymap[key2])) if len(key2s) > self.systems_collapse: loghosts = self.collapsed_rep % (key2s[0],len(key2s)-1) else: loghosts = ', '.join(key2s) service_rep.append([service, loghosts]) blank = 0 for svcrep in service_rep: if blank: key = ' ' else: blank = 1 rep[action] += self.line_rep % (flipper, key, svcrep[0], svcrep[1]) if rep[self.root_failure]: report += self.subreport_wrap % (self.root_failures_title, rep[self.root_failure]) if rep[self.root_open]: report += self.subreport_wrap % (self.root_logins_title, rep[self.root_open]) if rep[self.failure]: report += self.subreport_wrap % (self.user_failures_title, rep[self.failure]) if rep[self.open]: report += self.subreport_wrap % (self.user_logins_title, rep[self.open]) report = self.report_wrap % report return report if __name__ == '__main__': from epylog.helpers import ModuleTest ModuleTest(logins_mod, sys.argv) epylog/perl/0000755000175000017500000000000012527655413012321 5ustar tiagotiagoepylog/perl/epylog.pm0000644000175000017500000002502312527655413014160 0ustar tiagotiago#!/usr/bin/perl -w # epylog.pm # ---------------- # # Copyright (C) 2002 by Duke University # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA # 02111-1307, USA. # # $Id$ # # @Author Konstantin Ryabitsev # Michael Stenner # @version $Date$ # package epylog; ## # Strict enforces good coding practices by making you observe the # variable scope. # use strict; use Socket; ## # Create the object and bless it. # # @return the reference to the epylog object. # sub new { my $this = {}; $this->{known_uids} = undef; $this->{known_hosts} = undef; $this->{logfilter} = undef; $this->{filtsize} = undef; $this->{report} = undef; $this->{logcat} = undef; $this->{modname} = undef; $this->{headerline} = undef; $this->{loglevel} = undef; $this->{logeof} = undef; bless $this; return $this; } ## # Initialize our brand-new epylog object. # # @param $1 The name of the module using this object. Used to generate # log entries. # @return void # sub init { my $this = shift; my $modname = shift; my $headerline = "#\n# %s\n#"; $this->{known_uids} = {}; $this->{known_hosts} = {}; $this->{report} = []; $this->{filtsize} = 0; ## # Open the logfile, or STDIN if LOGCAT is undefined. # my $logcat = $this->option('LOGCAT', undef); if (defined($logcat)) { open(LOGCAT, $logcat) or die "cannot open input file $logcat"; } else { *LOGCAT = *STDIN; } $this->{logcat} = *LOGCAT; if (!eof(LOGCAT)){ $this->{logeof} = 0; } else { $this->{logeof} = 1; } ## # Open the file for processed strings, or write to STDERR if # LOGFILTER is not defined. # my $logfilter = $this->option('LOGFILTER', undef); if (defined($logfilter)){ open(LOGFILTER, ">$logfilter") or $this->mlog(0, "cannot open filtered strings file $logfilter"); } else { *LOGFILTER = *STDERR; } $this->{logfilter} = *LOGFILTER; $this->{modname} = $modname; $this->{headerline} = $headerline; $this->{loglevel} = 0; ## # By default, the loglevel is set to 1. QUIET sets it to 0. # DEBUG overrides QUIET and sets it to the value of DEBUG. # module_DEBUG (where module is replaced by the upper-case module # name) overrides DEBUG and sets loglevel to the value of # module_DEBUG. my $debug = $this->option('DEBUG', undef); my $md = uc($this->{modname}) . '_DEBUG'; my $module_debug = $this->option($md, undef); if (defined($debug) or defined($module_debug)) { if (defined($debug)) { $this->{loglevel} = $debug; } if (defined($module_debug)) { $this->{loglevel} = $module_debug; } } else { my $quiet = $this->option('QUIET', undef); if (!defined($quiet)){ $this->{loglevel} = 1; } } } ## # This sub takes a uid and looks up the user name. # # @param $1 the uid to look up. # @return the username. # sub getuname { my $this = shift; my $uid = shift; if (!defined($uid)){ return(undef); } if (exists($this->{known_uids}{$uid})){ return($this->{known_uids}{$uid}); } else { (my $uname) = getpwuid($uid); if (!defined($uname)){ $uname = "uid=$uid"; } $this->{known_uids}{$uid} = $uname; return $uname; } } ## # This sub tries to resolve hostnames if possible. If not, it returns # the ip address back. The %known_hosts hash is used to cache the values # for optimization. # # @param $1 The IP of a host to lookup. # @return The FQDN, or the IP address if lookup failed. # sub gethost { my $this = shift; my $host = shift; if (exists($this->{known_hosts}{$host})) { return($this->{known_hosts}{$host}); } else { ## # hash resolved names. This speeds things up because we often get # many hits from the same host. # my @host_a = gethostbyaddr(pack('CCCC', split(/\./, $host)), AF_INET); my $hostname = defined($host_a[0]) ? $host_a[0] : $host; $this->{known_hosts}{$host} = $hostname; return $hostname; } } ## # Since all syslog lines start uniformly, use this sub to # grab the name of the system from the log line. # # @param $1 The log line. # @return The name of the system this log line refers to. # sub getsystem { my $this = shift; my $line = shift; (my $system) = $line =~ m/.{15}\s(\S*)\s.*/; ## # syslog-ng can report hosts in a more complicated way :) # if ($system =~ m{[@/](\S+)}) { $system = $1; } return($system); } ## # A wrapper to process the options passed in by environment variables. # If the referred ENV variable is unset, then return the default value. # This behavior is useful for debugging the module. # # @param $1 The name of the ENV variable to grab. # @param $2 The default value to return if the ENV variable is unset. # @return The value of the environment variable, or the default value # if the variable is unset. # sub option { my $this = shift; my $op = shift; my $default = shift; return(exists($ENV{$op}) ? $ENV{$op} : $default); } ## # Fetch the next available line from the logfile (LOGCAT). If the end of # file is reached, it will set $this->{logeof} to 1. # # @return the next line available. # sub nextline { my $this = shift; my $logcat = $this->{logcat}; my $nextline = <$logcat>; chomp($nextline); if (eof($logcat)){ $this->{logeof} = 1; close($logcat); } return $nextline; } ## # This is used to test if we are at the end of the logfile. # # @return 1 if the end of file has been reached. # sub islogeof { my $this = shift; return $this->{logeof}; } ## # Add a string or an array of strings to the final module report. # # @param $1 an string or array of strings. # sub pushrep { my $this = shift; push(@{$this->{report}}, @_); } ## # Adds a syslog line or an array of syslog lines to the filtered strings # file. # # @param $1 a syslog line or an array of syslog lines. # sub pushfilt { my $this = shift; my $logfilter = $this->{logfilter}; my $filtline = join("\n", @_); print $logfilter "$filtline\n"; $this->{filtsize}++; } ## # Produce a debugging output. # # @param $1 the level # @param $2 a string or array of strings to output. # sub mlog { my $this = shift; my $level = shift; my $modname = $this->{modname}; if ($this->{loglevel} >= $level){ print STDOUT "$modname: ", @_, "\n"; } } ## # How many lines are currently in the filtered strings file? # # @return the number of syslog lines in LOGFILTER. # sub filtsize { my $this = shift; return $this->{filtsize}; } ## # How many lines are currently in the report? # # @return the number of lines in LOGREPORT. # sub repsize { my $this = shift; return $#{$this->{report}}; } ## # Make a pretty-looking and uniform report header. # # @param $1 a string with some descriptive title # @return a string with a formatted report title # sub mkrephdr { my $this = shift; my $msg = shift; my $hdr = sprintf($this->{headerline}, $msg); return $hdr; } ## # Closes any open filehandles and writes the report into LOGREPORT. # This must be called at the end of your module. # sub finalize { my $this = shift; my $title = shift; ## # Open output file, $LOGREPORT or write to STDOUT if LOGREPORT # isn't defined. # my $logreport = $this->option('LOGREPORT', undef); if (defined($logreport)) { open(LOGREPORT, ">$logreport") or $this->mlog(0, "cannot open output file $logreport"); } else { *LOGREPORT = *STDOUT; } if ($#{$this->{report}} >= 0){ print LOGREPORT join("\n", @{$this->{report}}) . "\n"; } if ($this->{logfilter} ne *STDERR){ close($this->{logfilter}); } if (*LOGREPORT ne *STDOUT){ close(LOGREPORT); } } 1; __END__ =head1 NAME epylog - Perl5 module for writing perl modules for epylog. =head1 SYNOPSIS use epylog; # create a new epylog object my $du = new epylog; # initialize the object $du->init('modulename'); # get a username from a userid $du->getuname(500); # get a hostname from an IP address $du->gethost('127.0.0.1'); # find the system name in a standard syslog line $du->getsystem($syslogline); # get the value of an environment variable # first parameter is the name of the variable, second one is # the default value to return if the variable is undefined. $du->option('TMPDIR', '/tmp'); # return the next available syslog line from the logs (LOGCAT) $du->nextline(); # check if the logfile is EOF'd. Returns 0 if not yet. $du->islogeof(); # add a string or an array of strings to the report (LOGREPORT) $du->pushrep('Report line'); # add a syslog line entry to the list of analyzed and filtered # lines (LOGFILTER) $du->pushfilt($syslog_line); # intelligently output some debug information. # first parameter is level, second parameter is the string to output. # level 0 -- critical errors, always output # level 1 -- standard epylog execution, without "--quiet" # level 2> -- additional levels of verbosity. $du->mlog(1, 'Processing data'); # return how many lines were added to the filter file (LOGFILTER) $du->filtsize(); # return how many lines were added to the report file (LOGREPORT) $du->repsize(); # make a pretty report header. $du->mkrephdr('NOTICED REBOOTS'); # call this at the end of your module! It closes the filehandles and # writes out the report. $du->finalize(); =head1 AUTHORS Konstantin Ryabitsev Michael Stenner Duke University Physics =head1 REVISION $Revision$ =head1 SEE ALSO epylog(8), epylog_modules(5) =cut epylog/perl/Makefile.in0000644000175000017500000000234112527655413014366 0ustar tiagotiagotop_srcdir = @top_srcdir@ srcdir = @top_srcdir@ prefix = @prefix@ exec_prefix = @exec_prefix@ sbindir = @sbindir@ datadir = @datadir@ sysconfdir = @sysconfdir@ localstatedir = @localstatedir@ libdir = @libdir@ mandir = @mandir@ pkgdocdir = $(datadir)/doc/@PACKAGE@-@VERSION@ pkgdatadir = $(datadir)/@PACKAGE@ pkgvardir = $(localstatedir)/lib/@PACKAGE@ pkgconfdir = $(sysconfdir)/@PACKAGE@ PACKAGE = @PACKAGE@ VERSION = @VERSION@ INSTALL = @INSTALL@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ COMPILEDIR_SCRIPT = $(top_srcdir)/compiledir CRON_DIR = @CRON_DIR@ TEMP_DIR = @TEMP_DIR@ PY_MODULE_DIR = @PY_MODULE_DIR@ PERL_MODULE_DIR = @PERL_MODULE_DIR@ MODULES_DIR = $(pkgdatadir)/modules LYNX_BIN = @LYNX_BIN@ INSTALLDIRS = $(PERL_MODULE_DIR) $(mandir)/man3 all: perldoc epylog.pm > epylog.3 install: all installdirs $(INSTALL_DATA) epylog.pm $(DESTDIR)$(PERL_MODULE_DIR)/epylog.pm $(INSTALL_DATA) epylog.3 $(DESTDIR)$(mandir)/man3/epylog.3 uninstall: $(RM) $(PERL_MODULE_DIR)/epylog.pm $(RM) $(mandir)/man3/epylog.3 clean: $(RM) epylog.3 distclean: clean $(RM) Makefile installdirs: for dir in $(INSTALLDIRS); do \ $(top_srcdir)/mkinstalldirs $(DESTDIR)/$$dir ; \ done epylog/AUTHORS0000644000175000017500000000107512527655413012432 0ustar tiagotiagoepylog ----- Konstantin ("Icon") Ryabitsev -- icon@linux.duke.edu Modules -------- Konstantin ("Icon") Ryabitsev (icon@linux.duke.edu): - notices_mod.py - logins_mod.py - mail_mod.py - weeder_mod.py - spamd_mod.py - packets_mod.py Special thanks to: - Michael Stenner for many of the original modules, plus contributions in both code and ideas - Various contributors including, but not limited to: Will Newton, Sean Dilda, Sean O'Connell, Heather Sarik, Chris Geddings, Josko Plazonic, Seth Vidal epylog/README0000644000175000017500000000455012527655413012243 0ustar tiagotiagoEpylog ------ Epylog is a new log notifier and parser which runs periodically out of cron, looks at your logs, processes the entries in order to present them in a more comprehensive format, and then provides you with the output. It is written specifically with large network clusters in mind where a lot of machines (around 50 and upwards) log to the same loghost using syslog or syslog-ng. Alternatively, Epylog can be invoked from the command line and provide a log report based on a certain provided time period. In this case it relies on syslog timestamps to find the offsets, as opposed to the end-of-log offsets stored during the last run, though this behavior is not as reliable and is easily thwarted by skewed clocks. AUTHOR ------- Konstantin Ryabitsev OBTAINING ---------- http://linux.duke.edu/projects/epylog/ BUGS ------ Please file the bugs in the Epylog bugzilla: http://devel.linux.duke.edu/bugzilla/ MAILING LIST ------------- The mailing list for epylog-related questions is: epylog@linux.duke.edu. Please send all your inquires there. LICENSE -------- Copyright (C) 2001-2004 by Duke University This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; either version 2 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA 02111-1307, USA. This license does not include files in the "modules" directory. They are covered under different licenses (see further). MODULES -------- Modules are the parsing soul of epylog. For more info please see "man epylog-modules". If you wrote a module, you are encouraged to contribute it to the epylog so other people can make use of it. Please inquire on the list for more information. MODULES LICENSE ---------------- Modules are considered a separate entity and are licensed to you as per the licensing conditions mentioned individually within the source of each module. -- $Id$ epylog/py/0000755000175000017500000000000012527655413012007 5ustar tiagotiagoepylog/py/Makefile.in0000644000175000017500000000235312527655413014057 0ustar tiagotiagotop_srcdir = @top_srcdir@ srcdir = @top_srcdir@ prefix = @prefix@ exec_prefix = @exec_prefix@ sbindir = @sbindir@ datadir = @datadir@ sysconfdir = @sysconfdir@ localstatedir = @localstatedir@ libdir = @libdir@ mandir = @mandir@ pkgdocdir = $(datadir)/doc/@PACKAGE@-@VERSION@ pkgdatadir = $(datadir)/@PACKAGE@ pkgvardir = $(localstatedir)/lib/@PACKAGE@ pkgconfdir = $(sysconfdir)/@PACKAGE@ PACKAGE = @PACKAGE@ VERSION = @VERSION@ INSTALL = @INSTALL@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ COMPILEDIR_SCRIPT = $(top_srcdir)/compiledir CRON_DIR = @CRON_DIR@ TEMP_DIR = @TEMP_DIR@ PYTHON_BIN = @PYTHON_BIN@ PY_MODULE_DIR = @PY_MODULE_DIR@ PERL_MODULE_DIR = @PERL_MODULE_DIR@ MODULES_DIR = $(pkgdatadir)/modules LYNX_BIN = @LYNX_BIN@ PYDIR = epylog INSTALLDIRS = $(PY_MODULE_DIR)/$(PYDIR) all: $(COMPILEDIR_SCRIPT) $(PYDIR) install: all installdirs for PYFILE in $(PYDIR)/*.py $(PYDIR)/*.pyc; do \ $(INSTALL_DATA) $$PYFILE $(DESTDIR)$(PY_MODULE_DIR)/$(PYDIR); \ done uninstall: $(RM) -rf $(INSTALLDIRS) clean: $(RM) $(PYDIR)/*.pyc $(PYDIR)/*.pyo distclean: clean $(RM) Makefile installdirs: for dir in $(INSTALLDIRS); do \ $(top_srcdir)/mkinstalldirs $(DESTDIR)/$$dir ; \ done epylog/py/epylog/0000755000175000017500000000000012527655413013306 5ustar tiagotiagoepylog/py/epylog/helpers.py0000644000175000017500000001260112527655413015322 0ustar tiagotiago""" This helper module is useful for writing and debugging Epylog modules. It provides several useful methods for running the modules standalone without having to invoke them as part of Epylog. """ ## # Copyright (C) 2003 by Duke University # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA # 02111-1307, USA. # # $Id$ # # @Author Konstantin Ryabitsev # @version $Date$ # import sys sys.path.insert(0, './py/') import epylog import getopt class ModuleTest: """ This class lets you debug and test your Epylog modules as standalone apps, not as part of Epylog invocation. This is quicker and just as effective. """ def __init__(self, epyclass, args): logger = epylog.Logger(5) cmdargs = args[1:] if not cmdargs: self._usage(args[0]) infile = None repfile = None filtfile = None opts = {} try: gopts, cmds = getopt.getopt(cmdargs, 'i:r:f:o:', []) for o, a in gopts: if o == '-i': infile = a if o == '-r': repfile = a if o == '-f': filtfile = a if o == '-o': pairs = a.split(';') for pair in pairs: pair.strip() key, value = pair.split('=') key = key.strip() value = value.strip() opts[key] = value except getopt.error, e: self._usage(args[0]) if opts: logger.put(5, 'Additional opts follow') logger.put(5, opts) logger.put(5, 'Instantiating the module') epymod = epyclass(opts, logger) if input is None: self._usage(args[0]) logger.put(5, 'Trying to open file %s for reading' % infile) try: infh = open(infile) except Exception, e: msg = "ERROR trying to open file %s: %s" % (infile, e) self._die(msg) if filtfile is not None: logger.put(5, 'Trying to open %s for writing' % filtfile) try: filtfh = open(filtfile, 'w') except Exception, e: msg = "ERROR trying to open file %s: %s" % (filtfile, e) self._die(msg) monthmap = epylog.log.mkmonthmap() rs = epylog.Result() while 1: line = infh.readline() if not line: break line = line.strip() linemap = self._mk_linemap(line, monthmap) msg = linemap['message'] for regex in epymod.regex_map.keys(): if regex.search(msg): handler = epymod.regex_map[regex] linemap['regex'] = regex logger.put(5, '%s -> %s' % (handler.__name__, msg)) result = handler(linemap) if result is not None: rs.add_result(result) if filtfile is not None: filtfh.write('%s\n' % line) break infh.close() if filtfile is not None: filtfh.close() if not rs.is_empty(): logger.put(5, 'Finalizing') report = epymod.finalize(rs) if repfile is not None: logger.put(5, 'Trying to write report to %s' % repfile) repfh = open(repfile, 'w') repfh.write(report) repfh.close() logger.put(5, 'Report written to %s' % repfile) else: logger.put(5, 'Report follows:') print report else: logger.put(5, 'No results for this run') logger.put(5, 'Done') def _mk_linemap(self, line, monthmap): """ Create a linemap out of a line entry. """ try: stamp, sys, msg = epylog.log.get_stamp_sys_msg(line, monthmap) except ValueError: msg = 'Invalid syslog line: %s' % line self._die(msg) linemap = {'line': line, 'stamp': stamp, 'system': sys, 'message': msg, 'multiplier': 1} return linemap def _die(self, message): """ Hot Grits Death! """ print 'FATAL ERROR: %s' % message sys.exit(1) def _usage(self, name): print '''Usage: %s -i testcase [-r report] [-f filter] [-o EXTRAOPTS] If -r is omitted, the report is printed to stdout If -f is omitted, filtered lines are not shown EXTRAOPTS: Extra options should be submitted in this matter: -o "option=value; option2=value; option3=value" ''' % name sys.exit(1) if __name__ == '__main__': print "See module documentation on how to use the helper" epylog/py/epylog/module.py0000644000175000017500000003767012527655413015162 0ustar tiagotiago""" This module handles the... er... modules for epylog, both internal and external. """ ## # Copyright (C) 2003 by Duke University # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA # 02111-1307, USA. # # $Id$ # # @Author Konstantin Ryabitsev # @version $Date$ # import ConfigParser import epylog import os import tempfile import string import re if 'mkdtemp' not in dir(tempfile): ## # Must be python < 2.3 # del tempfile import mytempfile as tempfile from ihooks import BasicModuleLoader _loader = BasicModuleLoader() class Module: """epylog Module class""" def __init__(self, cfgfile, logtracker, tmpprefix, logger): self.logger = logger logger.put(5, '>Module.__init__') logger.put(3, 'Initializing module for cfgfile %s' % cfgfile) config = ConfigParser.ConfigParser() logger.put(3, 'Reading in the cfgfile %s' % cfgfile) config.read(cfgfile) try: self.name = config.get('module', 'desc') except: self.name = 'Unnamed Module' try: self.enabled = config.getboolean('module', 'enabled') except: self.enabled = 0 if not self.enabled: logger.put(3, 'This module is not enabled. Skipping init.') return try: self.executable = config.get('module', 'exec') except: msg = 'Did not find executable name in "%s"' % cfgfile raise epylog.ConfigError(msg, logger) try: self.internal = config.getboolean('module', 'internal') except: self.internal = 0 try: self.priority = config.getint('module', 'priority') except: self.priority = 10 try: logentries = config.get('module', 'files') except: msg = 'Cannot find log definitions in module config "%s"' % cfgfile raise epylog.ConfigError(msg, logger) try: self.outhtml = config.getboolean('module', 'outhtml') except: self.outhtml = 0 self.extraopts = {} if config.has_section('conf'): logger.put(3, 'Found extra options') for option in config.options('conf'): value = config.get('conf', option) logger.put(5, '%s=%s' % (option, value)) self.extraopts[option] = value logger.put(5, 'Done with extra options') modname = os.path.basename(self.executable) tempfile.tempdir = tmpprefix self.tmpprefix = tmpprefix self.logdump = tempfile.mktemp('%s.DUMP' % modname) self.logreport = tempfile.mktemp('%s.REPORT' % modname) self.logfilter = tempfile.mktemp('%s.FILTER' % modname) logger.put(5, 'name=%s' % self.name) logger.put(5, 'executable=%s' % self.executable) logger.put(5, 'enabled=%d' % self.enabled) logger.put(5, 'internal=%d' % self.internal) logger.put(5, 'priority=%d' % self.priority) logger.put(5, 'logentries=%s' % logentries) logger.put(5, 'outhtml=%d' % self.outhtml) logger.put(5, 'logdump=%s' % self.logdump) logger.put(5, 'logreport=%s' % self.logreport) logger.put(5, 'logfilter=%s' % self.logfilter) ## # Init internal modules # if self.internal: self._init_internal_module() logger.put(3, 'Figuring out the logfiles from the log list') entrylist = logentries.split(',') self.logs = [] for entry in entrylist: entry = entry.strip() logger.put(5, 'entry=%s' % entry) logger.put(3, 'Getting a log object from entry "%s"' % entry) try: log = logtracker.getlog(entry) except epylog.AccessError: ## # Do not die, but disable this module and complain loudly # logger.put(0, 'Could not init logfile for entry "%s"' % entry) continue except epylog.NoSuchLogError: ## # Looks like all logfiles for this log entry are empty. # Ignore this log entry. logger.put(1, ('No logs found for %s, or they are all empty, '+ 'ignoring.') % entry) continue logger.put(5, 'Appending the log object to self.logs[]') self.logs.append(log) if len(self.logs) == 0: self.enabled = 0 logger.put(0, 'Module "%s" disabled' % self.name) return logger.put(5, 'Module._init_internal_module') dirname = os.path.dirname(self.executable) modname = os.path.basename(self.executable) modname = re.sub(re.compile('\.py'), '', modname) logger.puthang(3, 'Importing module "%s"' % modname) stuff = _loader.find_module_in_dir(modname, dirname) if stuff: try: module = _loader.load_module(modname, stuff) except Exception, e: msg = ('Failure trying to import module "%s" (%s): %s' % (self.name, self.executable, e)) raise epylog.ModuleError(msg, logger) else: msg = ('Could not find module "%s" in dir "%s"' % (modname, dirname)) raise epylog.ModuleError(msg, logger) logger.endhang(3) try: modclass = getattr(module, modname) self.epymod = modclass(self.extraopts, logger) except AttributeError: msg = 'Could not instantiate class "%s" in module "%s"' msg = msg % (modname, self.executable) raise epylog.ModuleError(msg, logger) logger.put(3, 'Opening "%s" for writing' % self.logfilter) self.filtfh = open(self.logfilter, 'w+') logger.put(5, 'Module.message_match') handler = None match_regex = None for regex in self.epymod.regex_map.keys(): if regex.search(message): logger.put(5, 'match: %s' % message) logger.put(5, 'matching module: %s' % self.name) match_regex = regex handler = self.epymod.regex_map[regex] break logger.put(5, 'Module.put_filtered') self.filtfh.write(line) logger.put(3, 'Wrote "%s" into filtfh' % line) logger.put(5, 'Module.no_report') self.logreport = None self.logfilter = None self.close_filtered() self.logger.put(5, 'Module.close_filtered') self.filtfh.close() logger.put(5, 'Module.finalize_processing') logger.put(3, 'Finalizing for module "%s"' % self.name) if self.filtfh.tell(): if not rs.is_empty(): logger.put(3, 'Finalizing the processing') report = self.epymod.finalize(rs) if report: logger.put(5, 'Report follows:') logger.put(5, report) repfh = open(self.logreport, 'w') repfh.write(report) repfh.close() else: self.logreport = None self.logfilter = None else: logger.put(3, 'No filtered strings for this module') self.logreport = None self.logfilter = None self.close_filtered() logger.put(3, 'Done with this module, deleting') del self.epymod logger.put(5, 'Module._invoke_external_module') logger.put(3, 'Dumping strings into "%s"' % self.logdump) totallen = self._dump_log_strings(self.logdump) if totallen == 0: logger.put(3, 'Nothing in the logs for this module. Passing exec') return logger.put(5, 'Setting LOGCAT to "%s"' % self.logdump) os.putenv('LOGCAT', self.logdump) modtmpprefix = os.path.join(self.tmpprefix, 'EPYLOG') logger.put(5, 'Setting TMPPREFIX env var to "%s"' % modtmpprefix) os.putenv('TMPPREFIX', modtmpprefix) logger.put(5, 'Setting CONFDIR env var to "%s"' % cfgdir) os.putenv('CONFDIR', cfgdir) if logger.is_quiet(): logger.put(2, 'This line will never be seen. :)') os.putenv('QUIET', 'YES') logger.put(5, 'Setting DEBUG to "%s"' % logger.debuglevel()) os.putenv('DEBUG', logger.debuglevel()) logger.put(5, 'Setting LOGREPORT to "%s"' % self.logreport) logger.put(5, 'Setting LOGFILTER to "%s"' % self.logfilter) os.putenv('LOGREPORT', self.logreport) os.putenv('LOGFILTER', self.logfilter) if len(self.extraopts): logger.put(5, 'Setting extra options') for extraopt in self.extraopts.keys(): optname = string.upper(extraopt) optval = self.extraopts[extraopt] logger.put(5, 'Setting %s to "%s"' % (optname, optval)) os.putenv(optname, optval) logger.put(3, 'Invoking "%s"' % self.executable) exitcode = os.system(self.executable) logger.put(3, 'External module finished with code "%d"' % exitcode) if exitcode and exitcode != 256: msg = ('External module "%s" exited abnormally (exit code %d)' % (self.executable, exitcode)) raise epylog.ModuleError(msg, logger) logger.put(3, 'Checking if we have the report') if not os.access(self.logreport, os.R_OK): logger.put(3, 'Report %s does not exist!' % self.logreport) self.logreport = None logger.put(3, 'Checking if we have the filtered strings') if not os.access(self.logfilter, os.R_OK): logger.put(3, 'Filtered file %s does not exist!' % self.logfilter) self.logfilter = None logger.put(5, 'Module.sanity_check') logger.put(3, 'Checking if executable "%s" is sane' % self.executable) if not os.access(self.executable, os.F_OK): msg = ('Executable "%s" for module "%s" does not exist' % (self.executable, self.name)) raise epylog.ModuleError(msg, logger) if not self.is_internal(): if not os.access(self.executable, os.X_OK): msg = ('Executable "%s" for module "%s" is not set to execute' % (self.executable, self.name)) raise epylog.ModuleError(msg, logger) logger.put(5, 'Module.get_html_report') if self.logreport is None: logger.put(3, 'No report from this module') return None logger.put(3, 'Getting the report from "%s"' % self.logreport) if not os.access(self.logreport, os.R_OK): msg = 'Log report from module "%s" is missing' % self.name raise epylog.ModuleError(msg, logger) logger.puthang(3, 'Reading the report from file "%s"' % self.logreport) fh = open(self.logreport) report = fh.read() fh.close() logger.endhang(3, 'done') if len(report): if not self.outhtml: logger.put(3, 'Report is not html') report = self._make_into_html(report) else: report = None logger.put(5, 'Module.get_filtered_strings_fh') if self.logfilter is None: logger.put(3, 'No filtered strings from this module') return None logger.put(3, 'Opening filtstrings file "%s"' % self.logfilter) if not os.access(self.logfilter, os.R_OK): msg = 'Filtered strings file for "%s" is missing' % self.name raise epylog.ModuleError(msg, logger) fh = open(self.logfilter) logger.put(5, 'Module._dump_log_strings') logger.put(5, 'filename=%s' % filename) logger.put(3, 'Opening the "%s" for writing' % filename) fh = open(filename, 'w') len = 0 for log in self.logs: len = len + log.dump_strings(fh) logger.put(3, 'Total length of the log is "%d"' % len) logger.put(5, '" and escaping the control chars. """ logger = self.logger logger.put(5, '>Module._make_into_html') logger.put(3, 'Regexing entities') report = re.sub(re.compile('&'), '&', report) report = re.sub(re.compile('<'), '<', report) report = re.sub(re.compile('>'), '>', report) report = '
\n%s\n
' % report logger.put(5, '= 0: # flags read successfully, modify flags |= _fcntl.FD_CLOEXEC _fcntl.fcntl(fd, _fcntl.F_SETFD, flags) except (ImportError, AttributeError): def _set_cloexec(fd): pass try: import thread as _thread _allocate_lock = _thread.allocate_lock except (ImportError, AttributeError): class _allocate_lock: def acquire(self): pass release = acquire _text_openflags = _os.O_RDWR | _os.O_CREAT | _os.O_EXCL if hasattr(_os, 'O_NOINHERIT'): _text_openflags |= _os.O_NOINHERIT if hasattr(_os, 'O_NOFOLLOW'): _text_openflags |= _os.O_NOFOLLOW _bin_openflags = _text_openflags if hasattr(_os, 'O_BINARY'): _bin_openflags |= _os.O_BINARY if hasattr(_os, 'TMP_MAX'): TMP_MAX = _os.TMP_MAX else: TMP_MAX = 10000 template = "tmp" tempdir = None # Internal routines. _once_lock = _allocate_lock() class _RandomNameSequence: """An instance of _RandomNameSequence generates an endless sequence of unpredictable strings which can safely be incorporated into file names. Each string is six characters long. Multiple threads can safely use the same instance at the same time. _RandomNameSequence is an iterator.""" characters = ("abcdefghijklmnopqrstuvwxyz" + "ABCDEFGHIJKLMNOPQRSTUVWXYZ" + "0123456789-_") def __init__(self): self.mutex = _allocate_lock() self.rng = _Random() self.normcase = _os.path.normcase def __iter__(self): return self def next(self): m = self.mutex c = self.characters choose = self.rng.choice m.acquire() try: letters = [choose(c) for dummy in "123456"] finally: m.release() return self.normcase(''.join(letters)) def _candidate_tempdir_list(): """Generate a list of candidate temporary directories which _get_default_tempdir will try.""" dirlist = [] # First, try the environment. for envname in 'TMPDIR', 'TEMP', 'TMP': dirname = _os.getenv(envname) if dirname: dirlist.append(dirname) # Failing that, try OS-specific locations. if _os.name == 'mac': try: refnum, dirid = _macfs.FindFolder(_MACFS.kOnSystemDisk, _MACFS.kTemporaryFolderType, 1) dirname = _macfs.FSSpec((refnum, dirid, '')).as_pathname() dirlist.append(dirname) except _macfs.error: pass elif _os.name == 'riscos': dirname = _os.getenv('Wimp$ScrapDir') if dirname: dirlist.append(dirname) elif _os.name == 'nt': dirlist.extend([ r'c:\temp', r'c:\tmp', r'\temp', r'\tmp' ]) else: dirlist.extend([ '/tmp', '/var/tmp', '/usr/tmp' ]) # As a last resort, the current directory. try: dirlist.append(_os.getcwd()) except (AttributeError, _os.error): dirlist.append(_os.curdir) return dirlist def _get_default_tempdir(): """Calculate the default directory to use for temporary files. This routine should be called exactly once. We determine whether or not a candidate temp dir is usable by trying to create and write to a file in that directory. If this is successful, the test file is deleted. To prevent denial of service, the name of the test file must be randomized.""" namer = _RandomNameSequence() dirlist = _candidate_tempdir_list() flags = _text_openflags for dir in dirlist: if dir != _os.curdir: dir = _os.path.normcase(_os.path.abspath(dir)) # Try only a few names per directory. for seq in xrange(100): name = namer.next() filename = _os.path.join(dir, name) try: fd = _os.open(filename, flags, 0600) fp = _os.fdopen(fd, 'w') fp.write('blat') fp.close() _os.unlink(filename) del fp, fd return dir except (OSError, IOError), e: if e[0] != _errno.EEXIST: break # no point trying more names in this directory pass raise IOError, (_errno.ENOENT, ("No usable temporary directory found in %s" % dirlist)) _name_sequence = None def _get_candidate_names(): """Common setup sequence for all user-callable interfaces.""" global _name_sequence if _name_sequence is None: _once_lock.acquire() try: if _name_sequence is None: _name_sequence = _RandomNameSequence() finally: _once_lock.release() return _name_sequence def _mkstemp_inner(dir, pre, suf, flags): """Code common to mkstemp, TemporaryFile, and NamedTemporaryFile.""" names = _get_candidate_names() for seq in xrange(TMP_MAX): name = names.next() file = _os.path.join(dir, pre + name + suf) try: fd = _os.open(file, flags, 0600) _set_cloexec(fd) return (fd, file) except OSError, e: if e.errno == _errno.EEXIST: continue # try again raise raise IOError, (_errno.EEXIST, "No usable temporary file name found") # User visible interfaces. def gettempprefix(): """Accessor for tempdir.template.""" return template tempdir = None def gettempdir(): """Accessor for tempdir.tempdir.""" global tempdir if tempdir is None: _once_lock.acquire() try: if tempdir is None: tempdir = _get_default_tempdir() finally: _once_lock.release() return tempdir def mkstemp(suffix="", prefix=template, dir=None, text=None): """mkstemp([suffix, [prefix, [dir, [text]]]]) User-callable function to create and return a unique temporary file. The return value is a pair (fd, name) where fd is the file descriptor returned by os.open, and name is the filename. If 'suffix' is specified, the file name will end with that suffix, otherwise there will be no suffix. If 'prefix' is specified, the file name will begin with that prefix, otherwise a default prefix is used. If 'dir' is specified, the file will be created in that directory, otherwise a default directory is used. If 'text' is specified and true, the file is opened in text mode. Else (the default) the file is opened in binary mode. On some operating systems, this makes no difference. The file is readable and writable only by the creating user ID. If the operating system uses permission bits to indicate whether a file is executable, the file is executable by no one. The file descriptor is not inherited by children of this process. Caller is responsible for deleting the file when done with it. """ if dir is None: dir = gettempdir() if text: flags = _text_openflags else: flags = _bin_openflags return _mkstemp_inner(dir, prefix, suffix, flags) def mkdtemp(suffix="", prefix=template, dir=None): """mkdtemp([suffix, [prefix, [dir]]]) User-callable function to create and return a unique temporary directory. The return value is the pathname of the directory. Arguments are as for mkstemp, except that the 'text' argument is not accepted. The directory is readable, writable, and searchable only by the creating user. Caller is responsible for deleting the directory when done with it. """ if dir is None: dir = gettempdir() names = _get_candidate_names() for seq in xrange(TMP_MAX): name = names.next() file = _os.path.join(dir, prefix + name + suffix) try: _os.mkdir(file, 0700) return file except OSError, e: if e.errno == _errno.EEXIST: continue # try again raise raise IOError, (_errno.EEXIST, "No usable temporary directory name found") def mktemp(suffix="", prefix=template, dir=None): """mktemp([suffix, [prefix, [dir]]]) User-callable function to return a unique temporary file name. The file is not created. Arguments are as for mkstemp, except that the 'text' argument is not accepted. This function is unsafe and should not be used. The file name refers to a file that did not exist at some point, but by the time you get around to creating it, someone else may have beaten you to the punch. """ ## from warnings import warn as _warn ## _warn("mktemp is a potential security risk to your program", ## RuntimeWarning, stacklevel=2) if dir is None: dir = gettempdir() names = _get_candidate_names() for seq in xrange(TMP_MAX): name = names.next() file = _os.path.join(dir, prefix + name + suffix) if not _os.path.exists(file): return file raise IOError, (_errno.EEXIST, "No usable temporary filename found") class _TemporaryFileWrapper: """Temporary file wrapper This class provides a wrapper around files opened for temporary use. In particular, it seeks to automatically remove the file when it is no longer needed. """ def __init__(self, file, name): self.file = file self.name = name self.close_called = 0 def __getattr__(self, name): file = self.__dict__['file'] a = getattr(file, name) if type(a) != type(0): setattr(self, name, a) return a # NT provides delete-on-close as a primitive, so we don't need # the wrapper to do anything special. We still use it so that # file.name is useful (i.e. not "(fdopen)") with NamedTemporaryFile. if _os.name != 'nt': # Cache the unlinker so we don't get spurious errors at # shutdown when the module-level "os" is None'd out. Note # that this must be referenced as self.unlink, because the # name TemporaryFileWrapper may also get None'd out before # __del__ is called. unlink = _os.unlink def close(self): if not self.close_called: self.close_called = 1 self.file.close() self.unlink(self.name) def __del__(self): self.close() def NamedTemporaryFile(mode='w+b', bufsize=-1, suffix="", prefix=template, dir=None): """Create and return a temporary file. Arguments: 'prefix', 'suffix', 'dir' -- as for mkstemp. 'mode' -- the mode argument to os.fdopen (default "w+b"). 'bufsize' -- the buffer size argument to os.fdopen (default -1). The file is created as mkstemp() would do it. Returns a file object; the name of the file is accessible as file.name. The file will be automatically deleted when it is closed. """ if dir is None: dir = gettempdir() if 'b' in mode: flags = _bin_openflags else: flags = _text_openflags # Setting O_TEMPORARY in the flags causes the OS to delete # the file when it is closed. This is only supported by Windows. if _os.name == 'nt': flags |= _os.O_TEMPORARY (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags) file = _os.fdopen(fd, mode, bufsize) return _TemporaryFileWrapper(file, name) if _os.name != 'posix' or _os.sys.platform == 'cygwin': # On non-POSIX and Cygwin systems, assume that we cannot unlink a file # while it is open. TemporaryFile = NamedTemporaryFile else: def TemporaryFile(mode='w+b', bufsize=-1, suffix="", prefix=template, dir=None): """Create and return a temporary file. Arguments: 'prefix', 'suffix', 'directory' -- as for mkstemp. 'mode' -- the mode argument to os.fdopen (default "w+b"). 'bufsize' -- the buffer size argument to os.fdopen (default -1). The file is created as mkstemp() would do it. Returns a file object. The file has no name, and will cease to exist when it is closed. """ if dir is None: dir = gettempdir() if 'b' in mode: flags = _bin_openflags else: flags = _text_openflags (fd, name) = _mkstemp_inner(dir, prefix, suffix, flags) try: _os.unlink(name) return _os.fdopen(fd, mode, bufsize) except: _os.close(fd) raise epylog/py/epylog/publishers.py0000644000175000017500000005745612527655413016061 0ustar tiagotiago""" This module is used to publish the report into a set of predefined publisher classes. You can write your own, as long as they contain the __init__ and publish methods. """ ## # Copyright (C) 2003 by Duke University # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA # 02111-1307, USA. # # $Id$ # # @Author Konstantin Ryabitsev # @version $Date$ # import epylog import os import re import socket import time import shutil import gzip import tempfile if 'mkdtemp' not in dir(tempfile): ## # Must be python < 2.3 # del tempfile import mytempfile as tempfile def make_html_page(template, starttime, endtime, title, module_reports, unparsed, logger): """ Make a html page out of a set of parameters, which include module reports. Used by most, if not all, publishers. """ logger.put(5, '>make_html_page') logger.put(3, 'Making a standard report page') fmtstr = re.sub(re.compile('%'), '%%', template) fmtstr = re.sub(re.compile('@@STARTTIME@@'), '%(starttime)s', fmtstr) fmtstr = re.sub(re.compile('@@ENDTIME@@'), '%(endtime)s', fmtstr) fmtstr = re.sub(re.compile('@@TITLE@@'), '%(title)s', fmtstr) fmtstr = re.sub(re.compile('@@HOSTNAME@@'), '%(hostname)s', fmtstr) fmtstr = re.sub(re.compile('@@MODULE_REPORTS@@'), '%(allrep)s', fmtstr) fmtstr = re.sub(re.compile('@@UNPARSED_STRINGS@@'), '%(unparsed)s', fmtstr) fmtstr = re.sub(re.compile('@@VERSION@@'), '%(version)s', fmtstr) logger.put(5, 'fmtstr=%s' % fmtstr) valumap = {} valumap['starttime'] = starttime valumap['endtime'] = endtime valumap['title'] = title valumap['hostname'] = socket.gethostname() logger.put(3, 'Concatenating the module reports together') allrep = '' for modrep in module_reports: logger.puthang(3, 'Processing report for "%s"' % modrep.name) allrep = '%s\n

%s

\n%s' % (allrep, modrep.name, modrep.htmlreport) logger.endhang(3) if allrep == '': allrep = 'No module reports' valumap['allrep'] = allrep if unparsed is not None: logger.put(3, 'Regexing <, > and &') unparsed = re.sub(re.compile('&'), '&', unparsed) unparsed = re.sub(re.compile('<'), '<', unparsed) unparsed = re.sub(re.compile('>'), '>', unparsed) logger.put(3, 'Wrapping unparsed strings into
')
        unparsed = '
\n%s
' % unparsed else: unparsed = 'No unparsed strings' valumap['unparsed'] = unparsed valumap['version'] = epylog.VERSION endpage = fmtstr % valumap logger.put(5, 'htmlreport follows:') logger.put(5, endpage) logger.put(5, 'publishers.mail_smtp') import smtplib logger.puthang(3, 'Mailing it via the SMTP server %s' % smtpserv) server = smtplib.SMTP(smtpserv) server.sendmail(fromaddr, toaddr, msg) server.quit() logger.endhang(3) logger.put(5, 'publishers.mail_sendmail') logger.puthang(3, 'Mailing the message via sendmail') p = os.popen(sendmail, 'w') p.write(msg) p.close() logger.endhang(3) logger.put(5, 'MailPublisher.__init__') self.logger = logger self.tmpprefix = config.tmpprefix self.section = sec logger.put(3, 'Looking for required elements in mail method config') try: mailto = config.get(self.section, 'mailto') addrs = mailto.split(',') self.mailto = [] for addr in addrs: addr = addr.strip() logger.put(5, 'adding mailto=%s' % addr) self.mailto.append(addr) except: self.mailto = ['root'] try: format = config.get(self.section, 'format') except: format = 'both' if (format != 'plain') and (format != 'html') and (format != 'both'): msg = ('Format for Mail Publisher must be either "html", "plain",' + ' or "both." Format "%s" is unknown') % format raise epylog.ConfigError(msg, logger) self.format = format if format != 'html': logger.put(3, 'Plaintext version requested. Checking for lynx') try: lynx = config.get(self.section, 'lynx') except: lynx = '/usr/bin/lynx' if not os.access(lynx, os.X_OK): msg = 'Could not find "%s"' % lynx raise epylog.ConfigError(msg, logger) self.lynx = lynx logger.put(3, 'Lynx found in "%s" and is executable' % self.lynx) try: include_rawlogs = config.getboolean(self.section,'include_rawlogs') except: include_rawlogs = 1 if include_rawlogs: try: rawlogs = int(config.get(self.section, 'rawlogs_limit')) except: rawlogs = 200 self.rawlogs = rawlogs * 1024 else: self.rawlogs = 0 try: self.smtpserv = config.get(self.section, 'smtpserv') except: self.smtpserv = 'localhost' logger.put(5, 'format=%s' % self.format) logger.put(5, 'rawlogs=%d' % self.rawlogs) logger.put(5, 'smtpserv=%s' % self.smtpserv) try: self.gpg_encrypt = config.getboolean(self.section, 'gpg_encrypt') try: self.gpg_keyringdir = config.get(self.section, 'gpg_keyringdir') except: self.gpg_keyringdir = None try: gpg_recipients = config.get(self.section, 'gpg_recipients') keyids = gpg_recipients.split(',') self.gpg_recipients = [] for keyid in keyids: keyid = keyid.strip() logger.put(5, 'adding gpg_recipient=%s' % keyid) self.gpg_recipients.append(keyid) except: # Will use all recipients found in the keyring self.gpg_recipients = None try: gpg_signers = config.get(self.section, 'gpg_signers') keyids = gpg_signers.split(',') self.gpg_signers = [] for keyid in keyids: keyid = keyid.strip() logger.put(5, 'adding gpg_signer=%s' % keyid) self.gpg_signers.append(keyid) except: self.gpg_signers = None except: self.gpg_encrypt = 0 logger.put(5, 'MailPublisher.publish') logger.puthang(3, 'Creating a standard html page report') html_report = make_html_page(template, starttime, endtime, title, module_reports, unparsed_strings, logger) self.htmlrep = html_report logger.endhang(3) self.plainrep = None if self.format != 'html': tempfile.tempdir = self.tmpprefix logger.puthang(3, 'Creating a plaintext format of the report') htmlfile = tempfile.mktemp('.html') tfh = open(htmlfile, 'w') tfh.write(html_report) tfh.close() logger.put(3, 'HTML report is in "%s"' % htmlfile) plainfile = tempfile.mktemp('PLAIN') logger.put(3, 'PLAIN report will go into "%s"' % plainfile) logger.put(3, 'Making a syscall to "%s"' % self.lynx) exitcode = os.system('%s -dump %s > %s 2>/dev/null' % (self.lynx, htmlfile, plainfile)) if exitcode or not os.access(plainfile, os.R_OK): msg = 'Error making a call to "%s"' % self.lynx raise epylog.SysCallError(msg, logger) logger.puthang(3, 'Reading in the plain version') tfh = open(plainfile) self.plainrep = tfh.read() tfh.close() logger.put(5, 'plainrep follows:') logger.put(5, self.plainrep) logger.endhang(3) logger.endhang(3) if self.rawlogs: ## # GzipFile doesn't work with StringIO. :/ Bleh. # tempfile.tempdir = self.tmpprefix outfh = open(tempfile.mktemp('GZIP'), 'w+') do_chunked_gzip(rawfh, outfh, 'rawlogs', logger) size = outfh.tell() if size > self.rawlogs: logger.put(1, '%d is over the defined max of "%d"' % (size, self.rawlogs)) logger.put(1, 'Not attaching the raw logs') self.rawlogs = 0 else: logger.put(5, 'Reading in the gzipped logs') outfh.seek(0) self.gzlogs = outfh.read() outfh.close() logger.puthang(3, 'Creating an email message') from email.mime.base import MIMEBase from email.mime.text import MIMEText from email.mime.multipart import MIMEMultipart logger.put(5, 'Creating a main header') root_part = MIMEMultipart('mixed') root_part.preamble = 'This is a multi-part message in MIME format.' logger.put(5, 'Creating the text/plain part') text_part = MIMEText(self.plainrep, 'plain', 'utf-8') logger.put(5, 'Creating the text/html part') html_part = MIMEText(self.htmlrep, 'html', 'utf-8') if self.rawlogs > 0: logger.put(5, 'Creating the application/x-gzip part') attach_part = MIMEBase('application', 'x-gzip') attach_part.set_payload(self.gzlogs) from email.encoders import encode_base64 logger.put(5, 'Encoding the gzipped raw logs with base64') encode_base64(attach_part) attach_part.add_header('Content-Disposition', 'attachment', filename='raw.log.gz') if self.format == 'both': # create another multipart for text+html alt_part = MIMEMultipart('alternative') alt_part.attach(text_part) alt_part.attach(html_part) root_part.attach(alt_part) elif self.format == 'html': root_part.attach(html_part) elif self.format == 'plain': root_part.attach(text_part) if self.rawlogs > 0: root_part.attach(attach_part) logger.endhang(3) if self.gpg_encrypt: logger.puthang(3, 'Encrypting the message') from StringIO import StringIO try: import gpgme if self.gpg_keyringdir and os.path.exists(self.gpg_keyringdir): logger.put(5, 'Setting keyring dir to %s' % self.gpg_keyringdir) os.environ['GNUPGHOME'] = self.gpg_keyringdir msg = root_part.as_string() logger.put(5, 'Cleartext follows') logger.put(5, msg) logger.put(5, 'Cleartext ends') cleartext = StringIO(msg) ciphertext = StringIO() ctx = gpgme.Context() ctx.armor = True recipients = [] signers = [] logger.put(5, 'self.gpg_recipients = %s' % self.gpg_recipients) logger.put(5, 'self.gpg_signers = %s' % self.gpg_signers) if self.gpg_recipients is not None: for recipient in self.gpg_recipients: logger.puthang(5, 'Looking for an encryption key for %s' % recipient) recipients.append(ctx.get_key(recipient)) logger.endhang(5) else: for key in ctx.keylist(): for subkey in key.subkeys: if subkey.can_encrypt: logger.put(5, 'Found can_encrypt key=%s' % subkey.keyid) recipients.append(key) break if self.gpg_signers is not None: for signer in self.gpg_signers: logger.puthang(5, 'Looking for a signing key for %s' % signer) signers.append(ctx.get_key(signer)) logger.endhang(5) if len(signers) > 0: logger.puthang(3, 'Encrypting and signing the report') ctx.signers = signers ctx.encrypt_sign(recipients, gpgme.ENCRYPT_ALWAYS_TRUST, cleartext, ciphertext) logger.endhang(3) else: logger.puthang(3, 'Encrypting the report') ctx.encrypt(recipients, gpgme.ENCRYPT_ALWAYS_TRUST, cleartext, ciphertext) logger.endhang(3) logger.puthang(5, 'Creating the MIME envelope for PGP') gpg_envelope_part = MIMEMultipart('encrypted') gpg_envelope_part.set_param('protocol', 'application/pgp-encrypted', header='Content-Type') gpg_envelope_part.preamble = 'This is an OpenPGP/MIME encrypted message (RFC 2440 and 3156)' gpg_mime_version_part = MIMEBase('application', 'pgp-encrypted') gpg_mime_version_part.add_header('Content-Disposition', 'PGP/MIME version identification') gpg_mime_version_part.set_payload('Version: 1') gpg_payload_part = MIMEBase('application', 'octet-stream', name='encrypted.asc') gpg_payload_part.add_header('Content-Disposition', 'OpenPGP encrypted message') gpg_payload_part.add_header('Content-Disposition', 'inline', filename='encrypted.asc') gpg_payload_part.set_payload(ciphertext.getvalue()) gpg_envelope_part.attach(gpg_mime_version_part) gpg_envelope_part.attach(gpg_payload_part) # envelope becomes the new root part root_part = gpg_envelope_part logger.endhang(5) except ImportError: logger.endhang(3) logger.put(0, 'Install pygpgme for GPG encryption support.') logger.put(0, 'Not mailing the report out of caution.') return logger.endhang(3) root_part['Subject'] = title root_part['To'] = ', '.join(self.mailto) root_part['X-Mailer'] = epylog.VERSION logger.put(5, 'Creating the message as string') msg = root_part.as_string() logger.put(5, 'Message follows') logger.put(5, msg) logger.put(5, 'End of message') logger.put(3, 'Figuring out if we are using sendmail or smtplib') if re.compile('^/').search(self.smtpserv): mail_sendmail(self.smtpserv, msg, logger) else: fromaddr = 'root@%s' % socket.gethostname() mail_smtp(self.smtpserv, fromaddr, self.mailto, msg, logger) logger.put(1, 'Mailed the report to: %s' % ','.join(self.mailto)) logger.put(5, 'FilePublisher.__init__') self.logger = logger self.tmpprefix = config.tmpprefix logger.put(3, 'Looking for required elements in file method config') msg = 'Required attribute "%s" not found' try: expire = int(config.get(sec, 'expire_in')) except: epylog.ConfigError(msg % 'expire_in', logger) try: dirmask = config.get(sec, 'dirmask') except: epylog.ConfigError(msg % 'dirmask', logger) try: filemask = config.get(sec, 'filemask') except: epylog.ConfigError(msg % 'filemask', logger) logger.put(3, 'Verifying dirmask and filemask') msg = 'Invalid mask for %s: %s' try: self.dirname = time.strftime(dirmask, time.localtime()) except: epylog.ConfigError(msg % ('dirmask', dirmask), logger) try: path = config.get(sec, 'path') except: epylog.ConfigError(msg % 'path', logger) try: self.filename = time.strftime(filemask, time.localtime()) except: epylog.ConfigError(msg % ('filemask', filemask), logger) self._prune_old(path, dirmask, expire) self.path = os.path.join(path, self.dirname) logger.put(3, 'Looking if we should save rawlogs') try: self.save_rawlogs = config.getboolean(sec, 'save_rawlogs') except: self.save_rawlogs = 0 if self.save_rawlogs: logger.put(3, 'Saving raw logs in the reports directory') logger.put(3, 'Checking if notify is set') self.notify = [] try: notify = config.get(sec, 'notify') for addy in notify.split(','): addy = addy.strip() logger.put(3, 'Will notify: %s' % addy) self.notify.append(addy) except: pass try: self.smtpserv = config.get(sec, 'smtpserv') except: self.smtpserv = '/usr/sbin/sendmail -t' if self.notify: try: self.pubroot = config.get(sec, 'pubroot') logger.put(5, 'pubroot=%s' % self.pubroot) except: msg = 'File publisher requires a pubroot when notify is set' raise epylog.ConfigError(msg, logger) logger.put(5, 'path=%s' % self.path) logger.put(5, 'filename=%s' % self.filename) logger.put(5, 'FilePublisher._prune_old') logger.put(3, 'Pruning directories older than %d days' % expire) expire_limit = int(time.time()) - (86400 * expire) logger.put(5, 'expire_limit=%d' % expire_limit) if not os.path.isdir(path): logger.put(3, 'Dir %s not found -- skipping pruning' % path) logger.put(5, 'FilePublisher.publish') logger.put(3, 'Checking and creating the report directories') if not os.path.isdir(self.path): try: os.makedirs(self.path) except OSError, e: logger.put(0, 'Error creating directory "%s": %s' % (self.path, e)) logger.put(0, 'File publisher exiting.') return logger.puthang(3, 'Creating a standard html page report') html_report = make_html_page(template, starttime, endtime, title, module_reports, unparsed_strings, logger) logger.endhang(3) filename = '%s.html' % self.filename repfile = os.path.join(self.path, filename) logger.put(3, 'Dumping the report into %s' % repfile) fh = open(repfile, 'w') fh.write(html_report) fh.close() logger.put(1, 'Report saved in: %s' % self.path) if self.notify: logger.puthang(3, 'Creating an email message') publoc = '%s/%s/%s' % (self.pubroot, self.dirname, filename) from email.mime.text import MIMEText eml = MIMEText('New Epylog report is available at:\r\n%s' % publoc) eml['Subject'] = '%s (report notification)' % title eml['To'] = ', '.join(self.notify) eml['X-Mailer'] = epylog.VERSION msg = eml.as_string() logger.put(3, 'Figuring out if we are using sendmail or smtplib') if re.compile('^/').search(self.smtpserv): mail_sendmail(self.smtpserv, msg, logger) else: fromaddr = 'root@%s' % socket.gethostname() mail_smtp(self.smtpserv, fromaddr, self.notify, msg, logger) logger.put(1, 'Notification mailed to: %s' % ','.join(self.notify)) if self.save_rawlogs: logfilen = '%s.log' % self.filename logfile = os.path.join(self.path, '%s.gz' % logfilen) logger.put(3, 'Gzipping logs and writing them to %s' % logfilen) outfh = open(logfile, 'w+') do_chunked_gzip(rawfh, outfh, logfilen, logger) outfh.close() logger.put(1, 'Gzipped logs saved in: %s' % self.path) logger.put(5, ' # @version $Date$ # import epylog import os import re import time import tempfile if 'mkdtemp' not in dir(tempfile): ## # Must be python < 2.3 # del tempfile import mytempfile as tempfile from publishers import * class ModuleReport: """ A small helper class to hold Module HTML reports. """ def __init__(self, name, htmlreport): self.name = name self.htmlreport = htmlreport class Report: """ This helper class holds the contents of a report before it is published using publisher classes. """ def __init__(self, config, logger): logger.put(5, '>Report.__init__') logger.put(3, 'Starting Report object intialization') self.logger = logger ## # publishers: a tuple of publisher objects # filt_fh: where the filtered strings from modules will go # useful: tells epylog if the report is of any use or not. # module_reports: module reports will be put here eventually # self.publishers = [] self.filt_fh = None self.useful = 0 self.module_reports = [] self.tmpprefix = config.tmpprefix self.runtime = time.localtime() sec = 'report' try: title = config.get(sec, 'title') except: title = '@@HOSTNAME@@ system events: @@LOCALTIME@@' try: self.template = config.get(sec, 'template').strip() except: raise epylog.ConfigError('Report template not specified', logger) if not os.access(self.template, os.R_OK): msg = 'Report template "%s" is not readable' % self.template raise epylog.AccessError(msg, logger) try: self.unparsed = config.getboolean(sec, 'include_unparsed') except: self.unparsed = 1 try: publishers = config.get(sec, 'publishers') except: msg = 'No publishers defined in "%s"' % sec raise epylog.ConfigError(msg, logger) logger.put(3, 'Title as defined in config is: "%s"' % title) hregex = re.compile('@@HOSTNAME@@') tregex = re.compile('@@LOCALTIME@@') if hregex.search(title): import socket hostname = socket.gethostname() logger.put(3, 'Regexing @@HOSTNAME@@ into "%s"' % hostname) title = re.sub(hregex, hostname, title) if tregex.search(title): timestr = time.strftime('%c', self.runtime) logger.put(3, 'Regexing @@LOCALTIME@@ into "%s"' % timestr) title = re.sub(tregex, timestr, title) self.title = title logger.put(3, 'Final title is: "%s"' % self.title) logger.put(3, 'template=%s' % self.template) logger.put(3, 'unparsed=%d' % self.unparsed) if self.unparsed: logger.put(3, 'Creating a temporary file for filtered strings') tempfile.tmpdir = self.tmpprefix filen = tempfile.mktemp('FILT') self.filt_fh = open(filen, 'w+') logger.put(3, 'Filtered strings file created in "%s"' % filen) logger.put(3, 'Publishers: "%s"' % publishers) logger.put(3, 'Initializing publishers') for sec in publishers.split(','): sec = sec.strip() logger.put(3, 'Looking for section definition "%s"' % sec) if sec not in config.sections(): message = 'Required publisher section "%s" not found' % sec raise epylog.ConfigError(message, logger) logger.put(3, 'Looking for method declaration') try: method = config.get(sec, 'method') except: msg = 'Publishing method not found in "%s"' % sec raise epylog.ConfigError(msg, logger) logger.put(3, 'Found method "%s"' % method) if method == 'file': publisher = FilePublisher(sec, config, logger) elif method == 'mail': publisher = MailPublisher(sec, config, logger) else: msg = 'Publishing method "%s" not supported' % method raise epylog.ConfigError(msg, logger) self.publishers.append(publisher) logger.put(3, 'Finished with Report object initialization') logger.put(5, 'Report.append_module_report') if len(module_report) > 0: modrep = ModuleReport(module_name, module_report) logger.put(3, 'Appending report for "%s"' % module_name) logger.put(5, 'report follows') logger.put(5, module_report) self.module_reports.append(modrep) self.useful = 1 else: logger.put(3, 'Module report is empty, ignoring') logger.put(5, 'Report.append_filtered_strings') if self.filt_fh is None: logger.put(3, 'No open filt_fh, ignoring') return fsfh.seek(0, 2) if fsfh.tell() != 0: logger.put(3, 'Appending filtered strings from module "%s"' % module_name) logger.put(5, 'Doing chunked read from %s to %s' % (fsfh.name, self.filt_fh.name)) fsfh.seek(0) while 1: chunk = fsfh.read(epylog.CHUNK_SIZE) if len(chunk): self.filt_fh.write(chunk) logger.put(5, 'wrote %d bytes' % len(chunk)) else: logger.put(5, 'EOF reached') break self.useful = 1 else: logger.put(3, 'Filtered Strings are empty, ignoring') logger.put(5, 'Report.set_stamps') [self.start_stamp, self.end_stamp] = stamps logger.put(5, 'start_stamp=%d' % self.start_stamp) logger.put(5, 'end_stamp=%d' % self.end_stamp) logger.put(5, 'Report.mk_unparsed_from_raw') if self.filt_fh is None: return None unparsed = '' self.filt_fh.seek(0, 2) if self.filt_fh.tell(): logger.puthang(1, 'Doing memory-friendly grep') tempfile.tempdir = self.tmpprefix weedfh = open(tempfile.mktemp('WEED'), 'w+') self._memory_friendly_grep(rawfh, weedfh) logger.endhang(3) logger.puthang(3, 'Reading in weeded logs') weedfh.seek(0) unparsed = weedfh.read() weedfh.close() logger.endhang(1) logger.put(5, 'Report.publish') if self.filt_fh is not None: if unparsed is None: unparsed = self.mk_unparsed_from_raw(rawfh) else: unparsed = '' logger.puthang(3, 'Reading in the template file "%s"' % self.template) fh = open(self.template) template = fh.read() fh.close() logger.endhang(3) starttime = time.strftime('%c', time.localtime(self.start_stamp)) endtime = time.strftime('%c', time.localtime(self.end_stamp)) for publisher in self.publishers: logger.puthang(3, 'Invoking publisher "%s"' % publisher.name) publisher.publish(template, starttime, endtime, self.title, self.module_reports, unparsed, rawfh) logger.endhang(3) def is_report_useful(self): """ Returns 0 if the report is not useful (no new strings in logs). """ return self.useful def _memory_friendly_grep(self, rawfh, weedfh): """ Utility method to do a memory-friendly grep. Fgrepping huge logfiles may use up a LOT of memory and cause the process to run away. This only uses a certain amount of lines at a time, making sure that only a small fraction of memory is ever used. This is only invoked if external modules were used. """ logger = self.logger logger.put(5, '>Report._memory_friendly_grep') tempfile.tmpdir = self.tmpprefix temp_raw = tempfile.mktemp('TEMPRAW') temp_filt = tempfile.mktemp('TEMPFILT') temp_weed = tempfile.mktemp('TEMPWEED') logger.put(5, 'temp_raw=%s' % temp_raw) logger.put(5, 'temp_filt=%s' % temp_filt) logger.put(5, 'temp_weed=%s' % temp_weed) logger.put(5, 'Kerchunking %s into %s' % (rawfh.name, temp_raw)) temp_rawfh = open(temp_raw, 'w') rawfh.seek(0) while 1: chunk = rawfh.read(epylog.CHUNK_SIZE) if chunk: temp_rawfh.write(chunk) logger.put(5, 'wrote %d bytes' % len(chunk)) else: logger.put(5, 'Reached EOF') break temp_rawfh.close() self.filt_fh.seek(0, 2) filtfh_size = self.filt_fh.tell() logger.put(5, 'filtfh_size=%d' % filtfh_size) self.filt_fh.seek(0) donesize = 0 while 1: logger.put(5, 'new iteration of filt_fh') if self.filt_fh.tell() == filtfh_size: logger.put(5, 'No more lines in filt_fh') break if os.access(temp_weed, os.F_OK): logger.put(5, 'Moving %s to %s' % (temp_weed, temp_raw)) os.rename(temp_weed, temp_raw) try: os.remove(temp_filt) except: pass temp_filtfh = open(temp_filt, 'w') s = self._dump_lines(self.filt_fh, temp_filtfh, epylog.GREP_LINES) temp_filtfh.close() donesize = donesize + s done = (donesize*100)/filtfh_size self._call_fgrep(temp_raw, temp_filt, temp_weed) logger.put(1, '%d%% done' % done) if not os.stat(temp_weed).st_size: logger.put(5, 'Nothing left after weeding') break logger.put(5, 'Reading weeding results from temp_weed') temp_weedfh = open(temp_weed) weedfh.write(temp_weedfh.read()) temp_weedfh.close() logger.put(3, 'Done doing memory friendly grep') logger.put(5, 'Report._dump_lines') logger.put(5, 'reading %d lines from "%s"' % (number, fromfh.name)) chunksize = 0 for i in range(number): line = fromfh.readline() if not line: logger.put(5, 'end of file reached at iter %d' % i) break chunksize = chunksize + len(line) tofh.write(line) writenum = i + 1 logger.put(3, 'wrote %d lines into %s' % (writenum, tofh.name)) logger.put(5, 'total size of chunk: %d' % chunksize) logger.put(5, 'Report._call_fgrep') fgrep = '/bin/fgrep -v -f %s %s > %s' % (filt, raw, weed) logger.put(3, 'Calling fgrep with command "%s"' % fgrep) ecode = os.system(fgrep) logger.put(5, 'ecode=%d' % ecode) if ecode and ecode != 256: msg = 'Call to fgrep for weed failed with exit code %d' % ecode raise epylog.SysCallError(msg, logger) logger.put(5, ' # @version $Date$ # import ConfigParser import exceptions import os import shutil import tempfile import re import threading import pwd import socket import sys if 'mkdtemp' not in dir(tempfile): ## # Must be python < 2.3 # del tempfile import mytempfile as tempfile from report import Report from module import Module from log import LogTracker VERSION = 'Epylog-1.0.7' CHUNK_SIZE = 8192 GREP_LINES = 10000 QUEUE_LIMIT = 500 LOG_SPLIT_RE = re.compile(r'(.{15,15})\s+(\S+)\s+(.*)$') SYSLOG_NG_STRIP = re.compile(r'.*[@/]') MESSAGE_REPEATED_RE = re.compile(r'last message repeated (\S+) times') class FormatError(exceptions.Exception): """ This exception is raised when there are problems with the syslog line processed. """ def __init__(self, message, logger): exceptions.Exception.__init__(self) logger.put(5, '!FormatError: %s' % message) self.args = message class ConfigError(exceptions.Exception): """ This exception is raised when there are misconfiguration problems. """ def __init__(self, message, logger): exceptions.Exception.__init__(self) logger.put(5, '!ConfigError: %s' % message) self.args = message class AccessError(exceptions.Exception): """ This exception is raised when there are errors accessing certain components of Epylog, log files, or temporary writing spaces. """ def __init__(self, message, logger): exceptions.Exception.__init__(self) logger.put(5, '!AccessError: %s' % message) self.args = message class OutOfRangeError(exceptions.Exception): """ This happens when Epylog tries to access a line in a logfile that is outside the specified range. """ def __init__(self, message, logger): exceptions.Exception.__init__(self) logger.put(5, '!OutOfRangeError: %s' % message) self.args = message class ModuleError(exceptions.Exception): """ This exception is raised when an Epylog module crashes or otherwise creates a problem. """ def __init__(self, message, logger): exceptions.Exception.__init__(self) logger.put(5, '!ModuleError: %s' % message) self.args = message class SysCallError(exceptions.Exception): """ This exception is raised when a call to a system binary is not successful. Most notable ones are grep (only used with external modules) and lynx/links/w3m. """ def __init__(self, message, logger): exceptions.Exception.__init__(self) logger.put(5, '!SysCallError: %s' % message) self.args = message class NoSuchLogError(exceptions.Exception): """ This exception is raised when Epylog tries to access or initialize a logfile that does not exist. """ def __init__(self, message, logger): exceptions.Exception.__init__(self) logger.put(5, '!NoSuchLogError: %s' % message) self.args = message class EmptyLogError(exceptions.Exception): """ This exception is raised when Epylog finds an empty logfile. """ def __init__(self, message, logger): exceptions.Exception.__init__(self) logger.put(5, '!EmptyLogError: %s' % message) self.args = message class GenericError(exceptions.Exception): """ This exception is raised for all other Epylog conditions. """ def __init__(self, message, logger): exceptions.Exception.__init__(self) logger.put(5, '!GenericError: %s' % message) self.args = message class Epylog: """ This is the core class of Epylog. A UI would usually communicate with it an it only. """ def __init__(self, cfgfile, logger): """ UIs may override the included logger, which would be useful for things like a possible GTK interface, a web interface, etc. """ self.logger = logger logger.put(5, '>Epylog.__init__') config = ConfigParser.ConfigParser() logger.puthang(3, 'Reading the config file "%s"' % cfgfile) try: config.read(cfgfile) except: msg = 'Could not read/parse config file "%s"' % cfgfile raise ConfigError(msg, logger) logger.endhang(3) ## # Read in the main configuration # logger.puthang(3, "Reading in main entries") try: self.cfgdir = config.get('main', 'cfgdir') self.vardir = config.get('main', 'vardir') except: msg = 'Could not parse the main config file "%s"' % cfgfile raise ConfigError(msg, logger) logger.put(5, 'cfgdir=%s' % self.cfgdir) logger.put(5, 'vardir=%s' % self.vardir) logger.endhang(3) logger.put(3, 'Checking if we can write to vardir') if not os.access(self.vardir, os.W_OK): msg = 'Write access required for vardir "%s"' % self.vardir raise ConfigError(msg, logger) ## # Set up a safe temp dir # logger.put(3, 'Setting up a temporary directory') try: tmpdir = config.get('main', 'tmpdir') tempfile.tempdir = tmpdir except: pass logger.put(3, 'Creating a safe temporary directory') try: tmpprefix = tempfile.mkdtemp('EPYLOG') except: msg = 'Could not create a safe temp directory in "%s"' % tmpprefix raise ConfigError(msg, logger) self.tmpprefix = tmpprefix tempfile.tempdir = tmpprefix logger.put(3, 'Temporary directory created in "%s"' % tmpprefix) logger.put(3, 'Sticking tmpprefix into config to pass to other objs') config.tmpprefix = self.tmpprefix ## # Create a file for unparsed strings. # self.unparsed = tempfile.mktemp('UNPARSED') logger.put(3, 'Unparsed strings will go into %s' % self.unparsed) ## # Get multimatch pref # try: self.multimatch = config.getboolean('main', 'multimatch') except: self.multimatch = 0 logger.put(5, 'multimatch=%d' % self.multimatch) ## # Get threading pref # try: threads = config.getint('main', 'threads') if threads < 2: logger.put(0, 'Threads set to less than 2, fixing') threads = 2 self.threads = threads except: self.threads = 50 logger.put(5, 'threads=%d' % self.threads) ## # Initialize the Report object # logger.puthang(3, 'Initializing the Report') self.report = Report(config, logger) logger.endhang(3) ## # Initialize the LogTracker object # logger.puthang(3, 'Initializing the log tracker object') logtracker = LogTracker(config, logger) self.logtracker = logtracker logger.endhang(3) ## # Process module configurations # self.modules = [] priorities = [] modcfgdir = os.path.join(self.cfgdir, 'modules.d') logger.put(3, 'Checking if module config dir "%s" exists' % modcfgdir) if not os.path.isdir(modcfgdir): msg = 'Module configuration directory "%s" not found' % modcfgdir raise ConfigError(msg, logger) logger.put(3, 'Looking for module configs in %s' % modcfgdir) for file in os.listdir(modcfgdir): cfgfile = os.path.join(modcfgdir, file) if os.path.isfile(cfgfile): logger.put(3, 'Found file: %s' % cfgfile) if not re.compile('\.conf$').search(cfgfile, 1): logger.put(3, 'Not a module config file, skipping.') continue logger.puthang(3, 'Calling the Module init routines') try: module = Module(cfgfile, logtracker, tmpprefix, logger) except (ConfigError, ModuleError), e: msg = 'Module Error: %s' % e logger.put(0, msg) continue logger.endhang(3) if module.enabled: logger.put(3, 'Module "%s" is enabled' % module.name) module.sanity_check() self.modules.append(module) priorities.append(module.priority) else: logger.put(3, 'Module "%s" is not enabled, ignoring' % module.name) else: logger.put(3, '%s is not a regular file, ignoring' % cfgfile) logger.put(3, 'Total of %d modules initialized' % len(self.modules)) if len(self.modules) == 0: raise ConfigError('No modules are enabled. Exiting.', logger) ## # Sort modules by priority # logger.put(3, 'sorting modules by priority') priorities.sort() for module in self.modules: logger.put(3, 'analyzing module: %s' % module.name) for i in range(0, len(priorities)): try: logger.put(5, 'module.priority=%d, priorities[i]=%d' % (module.priority, priorities[i])) except: logger.put(5, 'priorities[i] is module: %s' % priorities[i].name) if module.priority == priorities[i]: priorities[i] = module logger.put(5, 'priorities[i] is now: %s' % module.name) break self.modules = priorities self.imodules = [] self.emodules = [] for module in self.modules: logger.put(5, 'module: %s, priority: %d' % (module.name, module.priority)) if module.is_internal(): self.imodules.append(module) else: self.emodules.append(module) logger.put(5, 'Epylog.process_modules') logger.put(3, 'Finding internal modules') if len(self.imodules): self._process_internal_modules() if len(self.emodules): logger.puthang(3, 'Processing external modules') for module in self.emodules: logger.puthang(1, 'Processing module "%s"' % module.name) try: module.invoke_external_module(self.cfgdir) except ModuleError, e: ## # Module execution error! # Do not die, but provide a visible warning. # logger.put(0, str(e)) logger.endhang(1, 'done') logger.endhang(3) logger.put(5, 'Epylog.make_report') for module in self.modules: logger.put(3, 'Analyzing reports from module "%s"' % module.name) logger.put(5, 'logerport=%s' % module.logreport) logger.put(5, 'logfilter=%s' % module.logfilter) if module.logreport is None and module.logfilter is None: logger.put(3, 'No output from module "%s"' % module.name) logger.put(3, 'Skipping module "%s"' % module.name) continue logger.put(3, 'Preparing a report for module "%s"' % module.name) module_report = module.get_html_report() if module_report is not None: self.report.append_module_report(module.name, module_report) if self.emodules: ## # We only need filtered strings if we have external modules # fsfh = module.get_filtered_strings_fh() self.report.append_filtered_strings(module.name, fsfh) fsfh.close() self.report.set_stamps(self.logtracker.get_stamps()) logger.put(5, 'Epylog.publish_report') logger.put(3, 'Dumping all log strings into a temp file') tempfile.tempdir = self.tmpprefix rawfh = open(tempfile.mktemp('RAW'), 'w+') logger.put(3, 'RAW strings file created in "%s"' % rawfh.name) self.logtracker.dump_all_strings(rawfh) if not self.emodules: ## # All modules were internal, meaning we have all unparsed # strings in the self.unparsed file. # unparsed = self._get_unparsed() else: unparsed = None self.report.publish(rawfh, unparsed) logger.put(5, 'Epylog._process_internal_modules') logger.puthang(1, 'Processing internal modules') logger.put(3, 'Collecting logfiles used by internal modules') upfh = open(self.unparsed, 'w') logger.put(3, 'Opened unparsed strings file in "%s"' % self.unparsed) logmap = {} for module in self.imodules: for log in module.logs: try: logmap[log.entry].append(module) except KeyError: logmap[log.entry] = [module] logger.put(5, 'logmap follows') logger.put(5, logmap) pq = ProcessingQueue(QUEUE_LIMIT, logger) logger.put(3, 'Starting the processing threads') threads = [] try: while 1: t = ConsumerThread(pq, logger) t.start() threads.append(t) if len(threads) > self.threads: break for entry in logmap.keys(): log = self.logtracker.getlog(entry) if log.is_range_empty(): continue matched = 0 lines = 0 while 1: logger.put(3, 'Getting next line from "%s"' % entry) try: linemap = log.nextline() except FormatError, e: logger.put(5, 'Writing the line to unparsed') upfh.write(str(e)) continue except OutOfRangeError: break lines += 1 logger.put(5, 'We have the following:') logger.put(5, 'line=%s' % linemap['line']) logger.put(5, 'stamp=%d' % linemap['stamp']) logger.put(5, 'system=%s' % linemap['system']) logger.put(5, 'message=%s' % linemap['message']) logger.put(5, 'multiplier=%d' % linemap['multiplier']) match = 0 for module in logmap[entry]: logger.put(5, 'Matching module "%s"' % module.name) message = linemap['message'] handler, regex = module.message_match(message) linemap['regex'] = regex if handler is not None: match = 1 pq.put_linemap(linemap, handler, module) if not self.multimatch: logger.put(5, 'multimatch is not set') logger.put(5, 'Not matching other modules') break matched += match if not match: logger.put(5, 'Writing the line to unparsed') upfh.write(linemap['line']) bartitle = log.entry message = '%d of %d lines parsed' % (matched, lines) logger.endbar(1, bartitle, message) finally: logger.put(3, 'Notifying the threads that they may die now') pq.tell_threads_to_quit(threads) bartitle = 'Waiting for threads to finish' bartotal = len(threads) bardone = 1 for t in threads: logger.progressbar(1, bartitle, bardone, bartotal) t.join() bardone += 1 logger.endbar(1, bartitle, 'all threads done') upfh.close() logger.puthang(1, 'Finished all matching, now finalizing') for module in self.imodules: logger.puthang(1, 'Finalizing "%s"' % module.name) try: rs = pq.get_resultset(module) try: module.finalize_processing(rs) except Exception, e: msg = ('Module %s crashed in finalize stage: %s' % (module.name, e)) logger.put(0, msg) module.no_report() except KeyError: module.no_report() logger.endhang(1) logger.endhang(1) logger.endhang(1) logger.put(5, 'ProcessingQueue.__init__') logger.put(3, 'Initializing ProcessingQueue') self.mon = threading.RLock() self.iw = threading.Condition(self.mon) self.ow = threading.Condition(self.mon) self.lineq = [] self.resultsets = {} self.limit = limit self.working = 1 logger.put(5, 'ProcessingQueue.put_linemap') while len(self.lineq) >= self.limit: logger.put(5, 'Line queue is full, waiting...') self.ow.wait() self.lineq.append([linemap, handler, module]) logger.put(3, 'Added a new line in lineq') logger.put(5, 'items in lineq: %d' % len(self.lineq)) self.iw.notify() logger.put(5, 'ProcessingQueue.get_linemap') while not self.lineq and self.working: logger.put(5, 'Line queue is empty, waiting...') self.iw.wait() if self.working: item = self.lineq.pop(0) logger.put(3, 'Got new linemap for the thread.') logger.put(5, 'items in lineq: %d' % len(self.lineq)) self.ow.notify() else: item = None logger.put(5, 'ProcessingQueue.put_result') if result is not None: try: self.resultsets[module].add_result(result) except KeyError: self.resultsets[module] = Result() self.resultsets[module].add_result(result) module.put_filtered(line) logger.put(3, 'Added result from module "%s"' % module.name) else: logger.put(3, '"%s" returned result None. Skipping.' % module.name) logger.put(5, 'ProcessingQueue.get_resultset') rs = self.resultsets[module] self.logger.put(5, 'ProcessingQueue.tell_threads_to_quit') logger.put(1, 'Telling all threads to quit') logger.put(5, 'Waiting till queue is empty') while self.lineq: logger.put(5, 'items in lineq: %d' % len(self.lineq)) self.ow.wait() self.logger.put(5, 'working=0') self.working = 0 logger.put(3, 'Sending %d semaphore notifications' % len(threads)) for t in threads: self.iw.notify() logger.put(5, 'ConsumerThread.__init__') self.logger = logger self.queue = queue logger.put(5, 'ConsumerThread.run') while self.queue.working: logger.put(3, '%s: getting a new linemap' % self.getName()) item = self.queue.get_linemap() if item is not None: linemap, handler, module = item logger.put(3, '%s: calling the handler' % self.getName()) try: result = handler(linemap) if result is not None: line = linemap['line'] logger.put(5, '%s: returning result' % self.getName()) self.queue.put_result(line, result, module) else: logger.put(5, '%s: Result is None.' % self.getName()) except Exception, e: erep = 'Handler crash. Dump follows:\n' erep += ' Thread : %s\n' % self.getName() erep += ' Module : %s\n' % module.executable erep += ' Handler: %s\n' % handler.__name__ erep += ' Error : %s\n' % e erep += ' Line : %s\n' % linemap['line'].strip() erep += 'End Dump' logger.put(0, erep) else: logger.put(5, '%s: Item is none.' % self.getName()) logger.put(3, '%s: I am now dying' % self.getName()) logger.put(5, '') def htmlsafe(self, unsafe): """ Escapes all x(ht)ml control characters. """ unsafe = re.sub(self.amp_re, '&', unsafe) unsafe = re.sub(self.lt_re, '<', unsafe) unsafe = re.sub(self.gt_re, '>', unsafe) return unsafe def getuname(self, uid): """get username for a given uid""" uid = int(uid) try: return self._known_uids[uid] except KeyError: pass try: name = pwd.getpwuid(uid)[0] except KeyError: name = "uid=%d" % uid self._known_uids[uid] = name return name def gethost(self, ip_addr): """do reverse lookup on an ip address""" ## # Handle silly fake ipv6 addresses # try: if ip_addr[:7] == '::ffff:': ip_addr = ip_addr[7:] except: pass try: return self._known_hosts[ip_addr] except KeyError: pass try: name = socket.gethostbyaddr(ip_addr)[0] except socket.error: name = ip_addr self._known_hosts[ip_addr] = name return name def get_smm(self, lm): """ Return a systemname, message, and multiplier from a linemap, since these are most commonly needed in a module. """ return (lm['system'], lm['message'], lm['multiplier']) def mk_size_unit(self, size): """ Make a human-readable size unit from a size in bytes. """ ksize = int(size/1024) if ksize: msize = int(ksize/1024) if msize: gsize = int(msize/1024) if gsize: return (gsize, 'GB') return (msize, 'MB') return (ksize, 'KB') return (size, 'Bytes') class Logger: """ A default command-line logger class. Other GUIs should use their own, but fully implement the API. """ indent = ' ' hangmsg = [] hanging = 0 def __init__(self, loglevel): self.loglevel = loglevel def is_quiet(self): """Check if we should be quiet""" if self.loglevel == 0: return 1 else: return 0 def debuglevel(self): """Return the current debug level""" return str(self.loglevel) def put(self, level, message): """Log a message, but only if debug levels are lesser or match""" if (level <= self.loglevel): if self.hanging: self.hanging = 0 print '%s%s' % (self._getindent(), message) def puthang(self, level, message): """ This indents the output, create an easier-to-read debug data. """ if (level <= self.loglevel): print '%sInvoking: "%s"...' % (self._getindent(), message) self.hanging = 1 self.hangmsg.append(message) def endhang(self, level, message='done'): """Must be called after puthang has been put in effect""" if (level <= self.loglevel): hangmsg = self.hangmsg.pop() if self.hanging: self.hanging = 0 print '%s%s...%s' % (self._getindent(), hangmsg, message) else: print '%s(Hanging from "%s")....%s' % (self._getindent(), hangmsg, message) def progressbar(self, level, title, done, total): """ A simple command-line progress bar. """ if level != self.loglevel: return ## # Do some nifty calculations to present the bar # if len(title) > 40: title = title[:40] barwidth = 60 - len(title) - 2 - len(self._getindent()) barmask = "[%-" + str(barwidth) + "s]" if total != 0: bardown = int(barwidth*(float(done)/float(total))) else: bardown = 0 bar = barmask % ("=" * bardown) sys.stdout.write("\r%s%s: %s\r" % (self._getindent(), title, bar)) def endbar(self, level, title, message): """ After the progress bar is no longer useful, let's replace it with something useful. """ if level != self.loglevel: return if not message: print return ## # Do some nifty calculations to present the bar # if len(title) > 40: title = title[:40] barwidth = 60 - len(title) - len(self._getindent()) - 2 message = '[%s]' % message.center(barwidth) sys.stdout.write("\r%s%s: %s\n" % (self._getindent(), title, message)) def _getindent(self): """ Get the indent spaces. """ indent = self.indent * len(self.hangmsg) return indent epylog/py/epylog/log.py0000644000175000017500000015322312527655413014447 0ustar tiagotiago""" This operates on logfiles, including looking up strings, repeated data, handling rotated logs, figuring out the dates, etc. """ ## # Copyright (C) 2003 by Duke University # # This program is free software; you can redistribute it and/or # modify it under the terms of the GNU General Public License # as published by the Free Software Foundation; either version 2 # of the License, or (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place - Suite 330, Boston, MA # 02111-1307, USA. # # $Id$ # # @Author Konstantin Riabitsev # @version $Date$ # import epylog import os import re import string import time import tempfile if 'mkdtemp' not in dir(tempfile): ## # Must be python < 2.3 # del tempfile import mytempfile as tempfile def mkmonthmap(): """ The problem with syslog is that it does not log the year when the event has taken place. This makes certain things difficult, including looking up entries based on a timestamp. This function creates a mapping for months to a year. Pad sets how many months ahead of the current one should be considered in this year, and how many in the last year. This function was largely contributed by Michael Stenner. """ pad = 2 months = [] for i in range(0, 12): months.append(time.strftime("%b", (1, i+1, 1, 1, 1, 1, 1, 1, 1))) basetime = time.localtime(time.time()) now_year = basetime[0] now_month = basetime[1] pad_month = now_month + pad monthmap = {} for m in range(pad_month - 12, pad_month): monthname = months[m % 12] year = now_year + (m / 12) monthmap[monthname] = year return monthmap def mkstamp_from_syslog_datestr(datestr, monthmap): """ Takes a syslog date string and makes a timestamp out of it. """ try: (m, d, t) = datestr.split()[:3] y = str(monthmap[m]) datestr = string.join([y, m, d, t], ' ') tuptime = time.strptime(datestr, '%Y %b %d %H:%M:%S') ## # Python 2.2.2 (at least) breaks with DST. # Work around. # localtime = time.localtime(time.mktime(tuptime)) ltime = list(tuptime) ltime[8] = localtime[8] tuptime = tuple(ltime) timestamp = int(time.mktime(tuptime)) except: timestamp = -1 return timestamp def get_stamp_sys_msg(line, monthmap): """ This function takes a syslog line and returns the timestamp of the event, the system where it occured, and the message. """ mo = epylog.LOG_SPLIT_RE.match(line) if not mo: raise ValueError('Unknown line format: %s' % line) time, sys, msg = mo.groups() stamp = mkstamp_from_syslog_datestr(time, monthmap) sys = re.sub(epylog.SYSLOG_NG_STRIP, '', sys) return stamp, sys, msg class LogTracker: """ This is a helper class to track the logfiles as requested by the modules, so no logs are opened more often than needed. It also does tracking of rotating logfiles, opening and initializing them as necessary. """ def __init__(self, config, logger): """ Initializer code. Passing in config so we can use the variables set by the admin. """ self.logger = logger logger.put(5, '>LogTracker.__init__') self.tmpprefix = config.tmpprefix self.entries = [] self.logs = [] self.monthmap = mkmonthmap() logger.put(5, 'LogTracker.getlog') logger.put(3, 'Checking if we have a log for entry "%s"' % entry) if entry in self.entries: logger.put(3, 'Yes, returning that log') log = self._get_log_by_entry(entry) else: logger.put(3, 'Logfile for "%s" not yet initialized' % entry) log = self._init_log_by_entry(entry) logger.put(5, 'LogTracker.get_offset_map') omap = [] for log in self.logs: entry = log.entry inode = log.getinode() if log.orange.endix != 0: offset = 0 else: offset = log.orange.end_offset omap.append([entry, inode, offset]) logger.put(5, 'omap follows') logger.put(5, omap) logger.put(5, 'LogTracker.set_offset_by_entry') logger.put(5, 'entry=%s' % entry) logger.put(5, 'inode=%d' % inode) logger.put(5, 'offset=%d' % offset) if entry in self.entries: log = self._get_log_by_entry(entry) if log.getinode() != inode: logger.put(3, 'Inodes do not match. Assuming logrotation') try: log.set_range_param(1, offset, 0) except epylog.OutOfRangeError: logger.put(3, 'No rotated file in place. Set offset to 0') log.set_range_param(0, 0, 0) else: logger.put(3, 'Inodes match, setting offset to "%d"' % offset) log.set_range_param(0, offset, 0) else: msg = 'No such log entry "%s"' % entry raise epylog.NoSuchLogError(msg, logger) logger.put(5, 'LogTracker.dump_all_strings') len = 0 for log in self.logs: logger.put(3, 'Dumping strings for log entry "%s"' % log.entry) len = len + log.dump_strings(fh) logger.put(3, 'Total of %d bytes dumped into "%s"' % (len, fh.name)) logger.put(5, 'LogTracker.get_stamps') start_stamps = [] end_stamps = [] for log in self.logs: if log.is_range_empty(): logger.put(3, 'The range for this log is empty') continue [start_stamp, end_stamp] = log.get_stamps() if start_stamp != 0: start_stamps.append(start_stamp) if end_stamp != 0: end_stamps.append(end_stamp) if len(start_stamps): start_stamps.sort() start_stamp = start_stamps.pop(0) else: start_stamp = 0 if len(end_stamps): end_stamps.sort() end_stamp = end_stamps.pop(-1) else: end_stamp = 0 logger.put(5, 'start_stamp=%d' % start_stamp) logger.put(5, 'end_stamp=%d' % end_stamp) logger.put(5, 'LogTracker.set_offsets_by_timestamp') for log in self.logs: try: log.set_range_by_timestamps(start_stamp, end_stamp) except epylog.OutOfRangeError: msg = 'Timestamps not found for log entry "%s"' % log.entry logger.put(0, msg) logger.put(5, 'LogTracker._init_log_by_entry') logger.puthang(3, 'Initializing log object for entry "%s"' % entry) log = Log(entry, self.tmpprefix, self.monthmap, self.logger) logger.endhang(3) self.entries.append(entry) self.logs.append(log) logger.put(5, 'LogTracker._get_log_by_entry') for log in self.logs: if log.entry == entry: logger.put(3, 'Found log object "%s"' % entry) logger.put(5, 'OffsetRange.__init__') self.startix = startix self.endix = endix self.start_offset = start_offset self.end_offset = end_offset self.total_size = 0 logger.put(5, 'startix=%d' % self.startix) logger.put(5, 'start_offset=%d' % self.start_offset) logger.put(5, 'endix=%d' % self.endix) logger.put(5, 'end_offset=%d' % self.end_offset) logger.put(5, 'OffsetRange.setstart') self.startix = ix self.start_offset = offset logger.put(5, 'new startix=%d' % self.startix) logger.put(5, 'new start_offset=%d' % self.start_offset) self._recalc_total_size(loglist) logger.put(5, 'OffsetRange.setend') def start_is_end(self): """ Check whether the coordinates for start and end are the same and return true if so, otherwise return false. """ logger = self.logger logger.put(5, '>OffsetRange.start_is_end') empty = 0 if self.startix == self.endix: if self.start_offset == self.end_offset: empty = 1 logger.put(3, 'This range points to same location') logger.put(5, 'OffsetRange.is_inside') cond = 1 if ix > self.startix: logger.put(5, 'ix > self.startix') cond = 0 elif ix < self.endix: logger.put(5, 'ix > self.endix') cond = 0 elif ix == self.startix and offset < self.start_offset: logger.put(5, 'ix = self.startix and offset < self.start_offset') cond = 0 elif ix == self.endix and offset > self.end_offset: logger.put(5, 'ix = self.endix and offset > self.end_offset') cond = 0 if cond: logger.put(5, 'ix=%d, offset=%d is inside' % (ix, offset)) logger.put(5, 'LinePointer.__init__') self.ix = ix self.offset = offset logger.put(5, 'ix=%d' % self.ix) logger.put(5, 'offset=%d' % self.offset) logger.put(5, 'LinePointer.set') self.ix = ix self.offset = offset logger.put(5, 'ix=%d' % ix) logger.put(5, 'offset=%d' % offset) logger.put(5, 'Log.__init__') logger.puthang(3, 'Initializing Log object for entry "%s"' % entry) self.logger = logger self.tmpprefix = tmpprefix self.monthmap = monthmap self.entry = entry filename = self._get_filename() logger.puthang(3, 'Initializing the logfile "%s"' % filename) self.loglist = [] self.cur_rot_ix = 0 try: logfile = LogFile(filename, tmpprefix, monthmap, logger) logger.put(3, 'Appending logfile to the loglist') self.loglist.append(logfile) except epylog.EmptyLogError: logger.endhang(3) logger.puthang(3, '%s is empty, using the previous rotated log' % filename) self._init_next_rotfile() logfile = self.loglist[0] logger.endhang(3) self.orange = OffsetRange(0, 0, 0, logfile.end_offset, logger) logger.endhang(3) self.lp = None logger.put(5, 'Log.set_range_param') logger.put(5, 'ix=%d' % ix) logger.put(5, 'offset=%d' % offset) logger.put(5, 'whence=%d' % whence) while not self._is_valid_ix(ix): try: self._init_next_rotfile() except epylog.NoSuchLogError: msg = 'Invalid index "%d" for log "%s"' % (ix, self.entry) raise epylog.OutOfRangeError(msg, logger) logger.put(3, 'Checking if the offset makes sense') if self.loglist[ix].end_offset < offset: msg = 'Offset %d is past the end of %s: %d! Correcting.' % ( offset, self.loglist[ix].filename, self.loglist[ix].end_offset) logger.put(0, msg) self.orange.setstart(ix, self.loglist[ix].end_offset, self.loglist) else: if whence: logger.put(3, 'Setting range END for entry "%s"' % self.entry) self.orange.setend(ix, offset, self.loglist) else: logger.put(3, 'Setting range START for entry: %s' % self.entry) self.orange.setstart(ix, offset, self.loglist) logger.put(5, 'Log.getinode') logfile = self.loglist[0] inode = logfile.getinode() logger.put(5, 'inode=%d' % inode) logger.put(5, 'Log.nextline') if self.lp is None: ix = self.orange.startix offset = self.orange.start_offset logger.put(5, 'setting init linepointer with ix=%d, offset=%d' % (ix, offset)) self.lp = LinePointer(ix, offset, logger) ix = self.lp.ix offset = self.lp.offset logger.put(3, 'Checking if we are past the orange end') if not self.orange.is_inside(ix, offset): msg = 'Moved past the end of the range' raise epylog.OutOfRangeError(msg, logger) log = self.loglist[ix] line, offset = log.get_line_at_offset(offset) done = self.orange.done_size(ix, offset, self.loglist) total = self.orange.total_size title = log.filename logger.progressbar(1, title, done, total) if offset >= log.end_offset: logger.put(3, 'End of log "%s" reached' % log.filename) ix -= 1 offset = 0 self.lp.set(ix, offset) try: stamp, system, message = get_stamp_sys_msg(line, self.monthmap) multiplier = 1 mo = epylog.MESSAGE_REPEATED_RE.search(message) if mo: try: message = self._lookup_repeated(system) multiplier = int(mo.group(1)) except epylog.FormatError: pass except epylog.GenericError: pass log.repeated_cache[system] = message linemap = {'line': line, 'stamp': stamp, 'system': system, 'message': message, 'multiplier': multiplier} except ValueError: logger.put(0, 'Invalid syslog format string in %s: %s' % (log.filename, line)) # Pass it on raise epylog.FormatError(line, logger) logger.put(5, 'Log._lookup_repeated') log = self.loglist[self.lp.ix] try: message = log.repeated_cache[system] logger.put(3, 'Found in repeated_cache by system') logger.put(5, 'Log.dump_strings') logger.put(3, 'Dumping strings for log entry "%s"' % self.entry) ologs = self._get_orange_logs() if len(ologs) == 1: ## # All strings in the same file. Easy. # starto = self.orange.start_offset endo = self.orange.end_offset log = ologs[0] log.set_offset_range(starto, endo) buflen = log.dump_strings(fh) logger.put(3, '%d bytes dumped from %s into %s' % (buflen, log.filename, fh.name)) else: ## # Strings are in different rotfiles. Hard. # buflen = 0 flog = ologs.pop(0) elog = ologs.pop(-1) logger.put(3, 'Processing the earliest logfile') starto = self.orange.start_offset endo = flog.end_offset flog.set_offset_range(starto, endo) buflen = buflen + flog.dump_strings(fh) if len(ologs): logger.put(3, 'There are logfiles between the first and last') for mlog in ologs: mlog.set_offset_range(0, mlog.end_offset) buflen = buflen + mlog.dump_strings(fh) logger.put(3, 'Processing the latest logfile') starto = 0 endo = self.orange.end_offset elog.set_offset_range(starto, endo) buflen = buflen + elog.dump_strings(fh) logger.put(3, '%d bytes dumped from multiple rotfiles into "%s"' % (buflen, fh.name)) logger.put(5, 'Log.get_stamps') logs = self._get_orange_logs() flog = logs.pop(0) flog.range_start = self.orange.start_offset [start_stamp, end_stamp] = flog.get_range_stamps() if len(logs): elog = logs.pop(-1) elog.range_end = self.orange.end_offset [junk, end_stamp] = elog.get_range_stamps() logger.put(5, 'start_stamp=%d' % start_stamp) logger.put(5, 'end_stamp=%d' % end_stamp) logger.put(5, 'Log.set_range_by_timestamps') if start_stamp > end_stamp: msg = 'Start stamp must be before end stamp' raise epylog.OutOfRangeError(msg, logger) logger.put(5, 'looking for start_stamp=%d' % start_stamp) logger.put(5, 'looking for end_stamp=%d' % end_stamp) ix = 0 start_offset = None end_offset = None while 1: logger.put(5, 'ix=%d' % ix) try: curlog = self.loglist[ix] except IndexError: logger.put(3, 'This log is not yet initialized') try: curlog = self._init_next_rotfile() except epylog.NoSuchLogError: logger.put(3, 'No more rotated files present') if end_offset is not None: logger.put(5, 'setting start_offset to 0, last ix') start_offset = 0 start_ix = len(self.loglist) - 1 break else: msg = 'Range not found when searching for timestamps' raise epylog.OutOfRangeError(msg, logger) logger.put(3, 'Analyzing log file "%s"' % curlog.filename) try: pos_start = curlog.stamp_in_log(start_stamp) pos_end = curlog.stamp_in_log(end_stamp) except epylog.OutOfRangeError: logger.put(3, 'No useful entries in this log, ignoring') ix = ix + 1 continue if pos_start == 0: ## # In this log # logger.put(5, 'start_stamp is in "%s"' % curlog.filename) start_ix = ix start_offset = curlog.find_offset_by_timestamp(start_stamp) elif pos_start > 0: ## # Past this log. This means that we have missed the start # of this stamp. Set by the end_offset of the current log. # logger.put(5, 'start_stamp is past "%s"' % curlog.filename) logger.put(3, 'setting to end_offset of this log') start_ix = ix start_offset = curlog.end_offset if pos_end == 0: ## # In this log # logger.put(3, 'end_stamp is in "%s"' % curlog.filename) end_ix = ix end_offset = curlog.find_offset_by_timestamp(end_stamp) elif pos_end > 0 and end_offset is None: ## # Means that end of the search is past the end of the last # log. # logger.put(3, 'end_stamp is past the most current entry') logger.put(3, 'setting to end_offset of this ix') end_ix = ix end_offset = curlog.end_offset if start_offset is not None and end_offset is not None: logger.put(3, 'Found both the start and the end') break ix = ix + 1 logger.put(5, 'start_ix=%d' % start_ix) logger.put(5, 'start_offset=%d' % start_offset) logger.put(5, 'end_ix=%d' % end_ix) logger.put(5, 'end_offset=%d' % end_offset) self.orange.setstart(start_ix, start_offset, self.loglist) self.orange.setend(end_ix, end_offset, self.loglist) logger.put(5, 'Log.is_range_empty') empty = 0 if self.orange.start_is_end(): empty = 1 logger.put(3, 'Yes, range is empty') else: startlog = self.loglist[self.orange.startix] if (startlog.end_offset == self.orange.end_offset and self.orange.endix == self.orange.startix + 1 and self.orange.end_offset == 0): ## # This means that start is at the end of the last rotlog # and end is at the start of next rotlog, meaning that the # range is really empty. empty = 1 logger.put(5, 'Log._get_orange_logs') ologs = [] for ix in range(self.orange.startix, self.orange.endix - 1, -1): logger.put(5, 'appending "%s"' % self.loglist[ix].filename) ologs.append(self.loglist[ix]) logger.put(5, 'Log._is_valid_ix') ixlen = len(self.loglist) - 1 isvalid = 1 if ix > ixlen: logger.put(3, 'index %d is not valid' % ix) isvalid = 0 logger.put(5, 'Log._init_next_rotfile') self.cur_rot_ix += 1 rotname = self._get_rotname_by_ix(self.cur_rot_ix) try: logger.put(3, 'Initializing log for rotated file "%s"' % rotname) rotlog = LogFile(rotname, self.tmpprefix, self.monthmap, logger) self.loglist.append(rotlog) except epylog.AccessError: msg = 'No further rotated files for entry "%s"' % self.entry raise epylog.NoSuchLogError(msg, logger) except epylog.EmptyLogError: msg = 'Found an empty rotated log, ignoring it.' rotlog = self._init_next_rotfile() logger.put(5, 'Log._get_rotname_by_ix') logger.put(5, 'ix=%d' % ix) if re.compile('\[/').search(self.entry): ## # Full filename specified in the brackets: # e.g. /var/log/messages[/var/log/rotated/messages.#.gz] # rot_m = re.compile('\[(.*?)\]').search(self.entry) try: rotname = rot_m.group(1) except: msg = ('Could not figure out the rotated filename in "%s"' % self.entry) raise epylog.ConfigError(msg, logger) else: rotname = re.sub(re.compile('\[|\]'), '', self.entry) ## # There may not be any rotfiles specified! # if rotname == self.entry: msg = 'No file-rotation data found in "%s"' % self.entry raise epylog.NoSuchLogError(msg, logger) rotname = re.sub(re.compile('#'), str(ix), rotname) logger.put(5, 'rotname=%s' % rotname) logger.put(5, 'Log._get_filename') logger.put(5, 'entry=%s' % self.entry) filename = re.sub(re.compile('\[.*?\]'), '', self.entry) logger.put(5, 'filename=%s' % filename) logger.put(5, 'LogFile.__init__') self.tmpprefix = tmpprefix self.filename = filename self.monthmap = monthmap ## # start_stamp: the timestamp at the start of the log # end_stamp: the timestamp at the end of the log # end_offset: this is where the end of the log is # self.start_stamp = None self.end_stamp = None self.end_offset = None ## # range_start: the start offset of the range # range_end: the end offset of the range # self.range_start = 0 self.range_end = None ## # repeated_cache: map of offsets to repeated lines for # unwrapping those pesky "last message repeated" # entries # also a map of last lines for systems. # self.repeated_cache = {} logger.put(3, 'Running sanity checks on the logfile') self._accesscheck() logger.put(3, 'All checks passed') logger.put(3, 'Initializing the file') self._initfile() logger.put(5, 'LogFile._initfile') logger.put(3, 'Checking if we are gzipped (ends in .gz)') if re.compile('\.gz$').search(self.filename, 1): logger.put(3, 'Ends in .gz. Using GzipFile to open') import gzip tempfile.tmpdir = self.tmpprefix ungzfile = tempfile.mktemp('UNGZ') logger.put(3, 'Creating a tempfile in "%s"' % ungzfile) ungzfh = open(tempfile.mktemp('UNGZ'), 'w+') try: gzfh = gzip.open(self.filename) except: raise epylog.ConfigError(('Could not open file "%s" with' + ' gzip handler. Not gzipped?') % self.filename, logger) logger.put(3, 'Putting the contents of the gzlog into ungzlog') while 1: chunk = gzfh.read(epylog.CHUNK_SIZE) if chunk: ungzfh.write(chunk) logger.put(5, 'Read "%s" bytes from gzfh' % len(chunk)) else: logger.put(5, 'Reached EOF') break gzfh.close() self.fh = ungzfh else: logger.put(3, 'Does not end in .gz, assuming plain text') logger.put(3, 'Opening logfile "%s"' % self.filename) self.fh = open(self.filename) logger.put(3, 'Finding the start_stamp') self.fh.seek(0) self.start_stamp = self._get_stamp() logger.put(5, 'start_stamp=%d' % self.start_stamp) logger.put(3, 'Finding the end offset') self.fh.seek(0, 2) self._set_at_line_start() self.end_offset = self.fh.tell() self.range_end = self.fh.tell() logger.put(3, 'Finding the end_stamp') self.end_stamp = self._get_stamp() logger.put(5, 'end_stamp=%d' % self.end_stamp) logger.put(5, 'LogFile.set_offset_range') logger.put(5, 'start=%d' % start) logger.put(5, 'end=%d' % end) if start < 0: msg = 'Start of range cannot be less than zero' raise epylog.OutOfRangeError(msg, logger) if end > self.end_offset: msg = 'End of range "%d" is past the end of log' % end raise epylog.OutOfRangeError(msg, logger) if start > end: msg = 'Start of range cannot be greater than end' raise epylog.OutOfRangeError(msg, logger) self.fh.seek(start) self._set_at_line_start() self.range_start = self.fh.tell() self.fh.seek(end) self._set_at_line_start() self.range_end = self.fh.tell() logger.put(5, 'range_start=%d' % self.range_start) logger.put(5, 'range_end=%d' % self.range_end) logger.put(5, 'LogFile.getinode') inode = os.stat(self.filename).st_ino self.logger.put(5, 'inode=%d' % inode) self.logger.put(5, 'LogFile.stamp_in_log') logger.put(5, 'searchstamp=%d' % searchstamp) logger.put(5, 'start_stamp=%d' % self.start_stamp) logger.put(5, 'end_stamp=%d' % self.end_stamp) if self.start_stamp == 0 or self.end_stamp == 0: msg = 'No stampable entries in this log' raise epylog.OutOfRangeError(msg, logger) if searchstamp > self.end_stamp: logger.put(3, 'past the end of this log') ret = 1 elif searchstamp < self.start_stamp: logger.put(3, 'before the start of this log') ret = -1 elif searchstamp >= self.start_stamp and searchstamp <= self.end_stamp: logger.put(3, 'IN this log') ret = 0 logger.put(5, 'LogFile.find_offset_by_timestamp') if self.start_stamp == 0 or self.end_stamp == 0: logger.put(3, 'Does not seem like anything useful is in this file') raise epylog.OutOfRangeError('Nothing useful in this log', logger) if self.stamp_in_log(searchstamp) != 0: msg = 'This stamp does not appear to be in this log' raise epylog.OutOfRangeError(msg, logger) self._crude_locate(searchstamp) self._fine_locate(searchstamp) offset = self.fh.tell() logger.put(5, 'offset=%d' % offset) logger.put(5, 'LogFile.dump_strings') if self.range_end is None: msg = 'No range defined for logfile "%s"' % self.filename raise epylog.OutOfRangeError(msg, logger) chunklen = self.range_end - self.range_start logger.put(5, 'range_start=%d' % self.range_start) logger.put(5, 'range_end=%d' % self.range_end) logger.put(5, 'chunklen=%d' % chunklen) self.fh.seek(self.range_start) if chunklen > 0: iternum = int(chunklen/epylog.CHUNK_SIZE) lastchunk = chunklen%epylog.CHUNK_SIZE logger.put(5, 'iternum=%d' % iternum) logger.put(5, 'lastchunk=%d' % lastchunk) if iternum > 0: for i in range(iternum): chunk = self.fh.read(epylog.CHUNK_SIZE) fh.write(chunk) logger.put(5, 'wrote %d bytes from %s to %s' % (len(chunk), self.filename, fh.name)) if lastchunk > 0: chunk = self.fh.read(lastchunk) fh.write(chunk) logger.put(5, 'wrote %d bytes from %s to %s' % (len(chunk), self.filename, fh.name)) return chunklen def get_range_stamps(self): """ Get the timestamps at the beginning of the range offset, and at the end. """ logger = self.logger logger.put(5, '>LogFile.get_range_stamps') logger.put(5, 'range_start=%s' % self.range_start) self.fh.seek(self.range_start) start_stamp = self._get_stamp() self.fh.seek(self.range_end) end_stamp = self._get_stamp() logger.put(5, 'start_stamp=%d' % start_stamp) logger.put(5, 'end_stamp=%d' % end_stamp) logger.put(5, 'LogFile.get_line_at_offset') self.fh.seek(offset) line = self.fh.readline() offset = self.fh.tell() logger.put(5, 'LogFile.find_previous_entry_by_re') self.fh.seek(offset) count = 0 while 1: line = self._lineback() if regex.search(line): logger.put(5, 'Found line: %s' % line) break count += 1 logger.put(5, 'No match, going further back (count=%d)' % count) if count > limit: logger.put(5, 'Reached backstepping limit') msg = 'Out of sane range looking for line' raise epylog.OutOfRangeError(msg, logger) logger.put(5, 'LogFile._crude_locate') logger.put(3, 'Looking for "%d" in file %s' % (stamp, self.filename)) increment = int(self.end_offset/2) relative = increment logger.put(3, 'rewinding the logfile') self.fh.seek(0) logger.put(5, 'initial increment=%d' % increment) logger.put(5, 'initial relative=%d' % relative) ostamp = None while 1: old_ostamp = ostamp self._rel_position(relative) ostamp = self._get_stamp() if ostamp == 0: logger.put(3, 'Bogus timestamp! Breaking.') break logger.put(5, 'ostamp=%d' % ostamp) if old_ostamp == ostamp: logger.put(3, 'ostamp and old_ostamp the same. Breaking') break increment = int(increment/2) logger.put(5, 'increment=%d' % increment) if ostamp < stamp: logger.put(5, '<<<<<<<') relative = increment logger.put(5, 'Jumping forward by %d' % relative) elif ostamp > stamp: logger.put(5, '>>>>>>>') relative = -increment logger.put(5, 'Jumping backward by %d' % relative) elif ostamp == stamp: logger.put(5, '=======') break logger.put(5, 'Crude search finished at offset %d' % self.fh.tell()) logger.put(5, 'LogFile._fine_locate') lineloc = 0 oldlineloc = 0 before_stamp = None after_stamp = None current_stamp = None while 1: try: if lineloc > 0: logger.put(5, 'Going forward one line') before_stamp = current_stamp current_stamp = after_stamp after_stamp = None self._lineover() elif lineloc < 0: logger.put(5, 'Going back one line') before_stamp = None current_stamp = before_stamp after_stamp = current_stamp self._lineback() offset = self.fh.tell() if offset >= self.end_offset: ## # We have reached the end of the initialized log. # There are possibly entries past this point, but # we can't trust them, as they are appended after the # init and can screw us up. # logger.put(3, 'End of initialized log reached, breaking') self.fh.seek(self.end_offset) break if current_stamp is None: current_stamp = self._get_stamp() self.fh.seek(offset) if before_stamp is None: self._lineback() before_stamp = self._get_stamp() self.fh.seek(offset) if after_stamp is None: self._lineover() after_stamp = self._get_stamp() self.fh.seek(offset) except IOError: logger.put(3, 'Either end or start of file reached, breaking') break logger.put(5, 'before_stamp=%d' % before_stamp) logger.put(5, 'current_stamp=%d' % current_stamp) logger.put(5, 'after_stamp=%d' % after_stamp) logger.put(5, 'searching for %d' % stamp) if before_stamp == 0 or current_stamp == 0 or after_stamp == 0: logger.put(5, 'Bogus stamps found. Breaking.') break oldlineloc = lineloc if before_stamp >= stamp: logger.put(5, '>>>>>') lineloc = -1 elif before_stamp < stamp and after_stamp <= stamp: logger.put(5, '<<<<<') lineloc = 1 elif current_stamp < stamp and after_stamp >= stamp: logger.put(5, '<<<<<') lineloc = 1 elif before_stamp < stamp and current_stamp >= stamp: logger.put(5, '=====') break if oldlineloc == -lineloc: ## # fine_locate cannot reverse direction. # If it does, that means that entries are not in order, # which may happen quite frequently on poorly ntpd'd # machines. Get out and hope this is good enough. # logger.put(3, 'Reversed direction. Breaking.') break logger.put(3, 'fine locate finished at offset %d' % self.fh.tell()) logger.put(5, 'LogFile._lineover') offset = self.fh.tell() entry = self.fh.readline() if self.fh.tell() == offset: logger.put(3, 'End of file reached!') raise IOError logger.put(5, 'New offset at %d' % self.fh.tell()) logger.put(5, 'LogFile._lineback') #self._set_at_line_start() if self.fh.tell() <= 1: logger.put(3, 'Start of file reached') raise IOError entry = self._rel_position(-2) logger.put(5, 'New offset at %d' % self.fh.tell()) logger.put(5, 'LogFile._get_stamp') self._set_at_line_start() offset = self.fh.tell() curline = self.fh.readline() self.fh.seek(offset) if len(curline): try: stamp = self._mkstamp_from_syslog_datestr(curline) except epylog.FormatError: logger.put(3, 'Could not figure out the format of this string') logger.put(3, 'Making it 0') stamp = 0 else: raise epylog.EmptyLogError('%s is empty' % self.filename, logger) logger.put(5, 'LogFile._rel_position') offset = self.fh.tell() new_offset = offset + relative logger.put(5, 'offset=%d' % offset) logger.put(5, 'relative=%d' % relative) logger.put(5, 'new_offset=%d' % new_offset) if new_offset < 0: logger.put(3, 'new_offset less than 0. Setting to 0') new_offset = 0 self.fh.seek(new_offset) entry = self._set_at_line_start() logger.put(5, 'offset after _set_at_line_start: %d' % self.fh.tell()) logger.put(5, 'LogFile._mk_stamp_from_syslog_datestr') logger.put(5, 'datestr=%s' % datestr) logger.put(3, 'Trying to figure out the date from the string passed') timestamp = mkstamp_from_syslog_datestr(datestr, self.monthmap) if timestamp == -1: raise epylog.FormatError('Cannot grok the date format in "%s"' % datestr, logger) logger.put(5, 'timestamp=%d' % timestamp) logger.put(5, 'LogFile._accesscheck') logfile = self.filename logger.put(3, 'Running sanity checks on file "%s"' % logfile) if os.access(logfile, os.F_OK): logger.put(3, 'Path "%s" exists' % logfile) else: logger.put(3, 'Path "%s" does not exist' % logfile) raise epylog.AccessError('Log file "%s" does not exist' % logfile, logger) if os.access(logfile, os.R_OK): logger.put(3, 'File "%s" is readable' % logfile) else: logger.put(3, 'Logfile "%s" is not readable' % logfile) raise epylog.AccessError('Logfile "%s" is not readable' % logfile, logger) logger.put(5, 'LogFile._set_at_line_start') orig_offset = self.fh.tell() if orig_offset == 0: logger.put(5, 'Already at file start') return logger.put(5, 'starting the backstepping loop') entry = '' while 1: curchar = self.fh.read(1) if curchar == '\n': logger.put(5, 'Found newline at offset %d' % self.fh.tell()) break #logger.put(5, 'curchar=%s' % curchar) entry = curchar + entry offset = self.fh.tell() - 1 self.fh.seek(offset) if offset == 0: logger.put(3, 'Beginning of file reached!') break offset = offset - 1 self.fh.seek(offset) logger.put(5, 'Exited the backstepping loop') now_offset = self.fh.tell() rewound = orig_offset - now_offset logger.put(5, 'Line start found at offset "%d"' % now_offset) logger.put(5, 'rewound by %d characters' % rewound) logger.put(5, ' epylog chmod +x epylog for subdir in $(SUBDIRS) ; do \ $(MAKE) -C $$subdir all ; \ done touch all-stamp install: all installdirs for DOC in $(DOCFILES); do \ $(INSTALL_DATA) $$DOC $(DESTDIR)$(pkgdocdir); \ done for DOCDIR in $(DOCDIRS); do\ cp -rp $$DOCDIR $(DESTDIR)$(pkgdocdir); \ done $(INSTALL_SCRIPT) epylog $(DESTDIR)$(sbindir)/epylog for subdir in $(SUBDIRS) ; do \ $(MAKE) -C $$subdir install ; \ done uninstall: for subdir in $(SUBDIRS) ; do \ $(MAKE) -C $$subdir uninstall ; \ done $(RM) -rf $(pkgdocdir) $(RM) $(sbindir)/epylog rmdir $(pkgvardir) clean: for subdir in $(SUBDIRS) ; do \ $(MAKE) -C $$subdir clean ; \ done $(RM) epylog $(RM) all-stamp distclean: clean for subdir in $(SUBDIRS) ; do \ $(MAKE) -C $$subdir distclean ; \ done $(RM) config.log $(RM) config.cache $(RM) Makefile $(RM) config.status $(RM) epylog $(RM) compiledir $(RM) -rf autom4te.cache installdirs: for dir in $(INSTALLDIRS); do \ $(top_srcdir)/mkinstalldirs $(DESTDIR)/$$dir ; \ done epylog/configure.in0000644000175000017500000001023412527655413013670 0ustar tiagotiagodnl Process this file with autoconf to produce a configure script AC_INIT(epylog.in) PACKAGE_TARNAME=epylog PACKAGE_VERSION=1.0.7 dnl Package information. PACKAGE=$PACKAGE_TARNAME VERSION=$PACKAGE_VERSION AC_SUBST(VERSION) AC_SUBST(PACKAGE) dnl Default prefix. AC_PREFIX_DEFAULT(/usr) dnl top_builddir. top_builddir=`pwd` AC_SUBST(top_builddir) AC_SUBST(docdir) AC_SUBST(mandir) AC_SUBST(sysconfdir) dnl Checks for programs. AC_PROG_INSTALL AC_MSG_CHECKING(for --with-python) AC_ARG_WITH(python, [ --with-python=BIN Path to specific Python binary], [ AC_SUBST(PYTHON_BIN) PYTHON_BIN="$withval" AC_MSG_RESULT($PYTHON_BIN) ], AC_MSG_RESULT(no)) # check for Python executable if test -z "$PYTHON_BIN"; then AC_PATH_PROG(PYTHON_BIN, python) if test -z "$PYTHON_BIN"; then AC_MSG_ERROR(python binary not found in path) fi fi AC_SUBST(CRON_DIR) CRON_DIR=$sysconfdir/cron.daily AC_MSG_CHECKING(for --with-crondir) AC_ARG_WITH(crondir, [ --with-crondir=DIR Where to install the cron script], [ CRON_DIR="$withval" AC_MSG_RESULT($CRON_DIR) ], AC_MSG_RESULT(will use $CRON_DIR)) # find out python version AC_MSG_CHECKING(Python version) PyVERSION=`$PYTHON_BIN -c ['import sys; print sys.version[:3]'`] PyMAJVERSION=`$PYTHON_BIN -c ['import sys; print sys.version[:1]'`] PyMINVERSION=`$PYTHON_BIN -c ['import sys; print sys.version[2:3]'`] AC_MSG_RESULT($PyVERSION) # make sure Python is version 2 if test "$PyMAJVERSION" -lt "2" || \ (test "$PyMAJVERSION" -eq "2" && test "$PyMINVERSION" -lt "2"); then AC_MSG_ERROR([This version of epylog requires at least Python version 2.2. The one you have seems to be $PyVERSION. You can specify an alternate python by using (for example) --with-python=/usr/bin/python2.2]) fi # check for libxml2 AC_MSG_CHECKING(for libxml2) if ! $PYTHON_BIN -c 'import libxml2' 2>/dev/null; then AC_MSG_ERROR([libxml2 not found. This version of epylog requires libxml2-python. See http://xmlsoft.org/ or install libxml2-python rpm.]) fi AC_MSG_RESULT(yes) # find out compiled in install prefix AC_MSG_CHECKING(Python install prefix) PyEXEC_INSTALLDIR=`$PYTHON_BIN -c "import sys; print sys.exec_prefix"` AC_MSG_RESULT($PyEXEC_INSTALLDIR) # this is where the Python libraries will get installed if # --with-python-dirs is set AC_SUBST(PY_STD_LIB) PY_STD_LIB=${PyEXEC_INSTALLDIR}/lib/python${PyVERSION} # otherwise, use this (relative to $libdir) AC_SUBST(PY_PREFIX_LIB) PY_PREFIX_LIB=$libdir/python${PyVERSION} # now figure out which of these to use. On most systems with normal defaults, # these will be the same anyway. AC_SUBST(PY_MODULE_DIR) AC_MSG_CHECKING(for --with-python-dirs) AC_ARG_WITH(python-dirs, [ --with-python-dirs Put modules in python's site-packages dir], [ PY_MODULE_DIR=$withval AC_MSG_RESULT(yes) ], [ PY_MODULE_DIR=$PY_PREFIX_LIB/site-packages AC_MSG_RESULT(will use $PY_MODULE_DIR) ]) AC_SUBST(PERL_MODULE_DIR) AC_MSG_CHECKING(for --with-site-perl) AC_ARG_WITH(site-perl, [ --with-site-perl Put perl module in site_perl dir], [ PERL_MODULE_DIR=$withval AC_MSG_RESULT(yes) ], [ #eval `perl '-V:installsitelib'` PERL_MODULE_DIR=$libdir/perl5/site_perl AC_MSG_RESULT(will use $PERL_MODULE_DIR) ]) AC_SUBST(TEMP_DIR) AC_MSG_CHECKING(for --with-temp-dir) AC_ARG_WITH(temp-dir, [ --with-temp-dir Use this for temp dir (/var/tmp)], [ TEMP_DIR=$withval AC_MSG_RESULT(yes) ], [ TEMP_DIR=/var/tmp AC_MSG_RESULT(will use $TEMP_DIR) ]) AC_SUBST(LYNX_BIN) AC_MSG_CHECKING(for --with-lynx) AC_ARG_WITH(lynx, [ --with-lynx Lynx/links/w3m location (/usr/bin/lynx)], [ LYNX_BIN=$withval AC_MSG_RESULT(yes) ], [ AC_MSG_RESULT(no) for lynxpath in /usr/bin/lynx /usr/bin/links /usr/bin/w3m; do AC_MSG_CHECKING(for $lynxpath) if ! test -x $lynxpath; then AC_MSG_RESULT(no) else AC_MSG_RESULT(yes) LYNX_BIN=$lynxpath break fi done if test -z "$LYNX_BIN"; then AC_MSG_ERROR(Lynx not found. Please specify with --with-lynx) fi ]) AC_OUTPUT_COMMANDS(chmod +x compiledir) AC_OUTPUT(Makefile cron/Makefile etc/Makefile etc/modules.d/Makefile \ perl/Makefile man/Makefile modules/Makefile \ py/Makefile compiledir) epylog/INSTALL0000644000175000017500000000076112527655413012414 0ustar tiagotiagoRPM --- Easiest is to get an RPM and to install it. The RPM distributed on the Epylog site is known to work on Red Hat Linux 7.3, 8.0, 9, and Yellow Dog Linux 3.0. MANUAL BUILD ------------ ./configure make make install Check out the options that ./configure takes by running ./configure --help. EDIT CONFIGS ------------ Edit the files in /etc/epylog to reflect your environment. This should be it. BUGS ---- If there are any problems, please file a bug report, or contact the mailing list. epylog/mkinstalldirs0000755000175000017500000000123712527655413014170 0ustar tiagotiago#! /bin/sh # mkinstalldirs --- make directory hierarchy # Author: Noah Friedman # Created: 1993-05-16 # Public domain # $Id$ errstatus=0 for file do set fnord `echo ":$file" | sed -ne 's/^:\//#/;s/^://;s/\// /g;s/^#/\//;p'` shift pathcomp= for d do pathcomp="$pathcomp$d" case "$pathcomp" in -* ) pathcomp=./$pathcomp ;; esac if test ! -d "$pathcomp"; then echo "mkdir $pathcomp" mkdir "$pathcomp" || lasterr=$? if test ! -d "$pathcomp"; then errstatus=$lasterr fi fi pathcomp="$pathcomp/" done done exit $errstatus # mkinstalldirs ends here epylog/man/0000755000175000017500000000000012527655413012132 5ustar tiagotiagoepylog/man/epylog-modules.50000644000175000017500000000572512527655413015176 0ustar tiagotiago.TH "epylog-modules" "5" "1.0" "Konstantin Ryabitsev" "Applications/System" .SH NAME \fBepylog-modules\fR \- epylog module cofiguration. .SH SYNOPSIS \fBepylog\fR uses pluggable modules to perform analysis and report on syslog strings. This manpage explains the format of the module config files. .SH "modules.d" Epylog config files are placed in the \fBmodules.d\fR directory of the cfgdir specified in epylog.conf. Any file ending in .conf in that directory is considered a module config file. Most common location for modules.d directory is in /etc/epylog/modules.d. .SH "module.conf" The name of the config file doesn't carry much meaning, however it MUST end in .conf in order to be recognized as a module config file. The config file for each module is separated into two parts: [module] and [conf]. .SH [module] .TP .B desc The description of the module. It will be shown in the final report. .TP .B exec This is where the "body" of the module is located. Most modules that come with the distribution will be placed in /usr/share/epylog/modules, but depending on your setup, you may place them elsewhere. .TP .B files List the logfiles requested by this module in this field. Separate multiple entries by comma. Epylog will handle rotated files, but you need to specify the mask appropriately. E.g. the most common logrotate setup will place rotated files in the same directory and add .0, .1, \.2, etc to the end of the file. Therefore, a file entry would look like so: .br .B /var/log/filename[.#] .br If you have compression turned on, your entry will look like so: .br .B /var/log/filename[.#.gz] .br Lastly, for advanced configurations, more complex entries may be required. E.g. if your logrotate saves rotated files in a subdirectory in /var/log, you can specify it like so: .br .B /var/log/[rotate/]filename[.#.gz] .br This will work, too: .br .B /var/log/filename[/var/rotate/filename.#.gz] .br In any case, "#" will be where the increments will go. .TP .B enabled Can be either "yes" or "no". If "no" is specified, Epylog will completely ignore this module. .TP .B internal Can be either "yes" or "no". If "yes", then the module is handled as an internal module, and if "no", then the external module API is used. See doc/modules.txt for more information about the module APIs. .TP .B outhtml Specifies whether the output produced by the module is HTML or not. Can be either "yes" or "no". .TP .B priority An unsigned int. Most commonly a number from 0 to 10. Modules with the lowest number will be considered the highest prioroty and will be both invoked and presented in the final report before the others. .SH "[conf]" This is where per\-module configuration directives go. Some modules have these, some don't. Look in the module config file \-\- the available values should be listed and described there. .SH "COMMENTS" Lines starting with "#" will be considered commented out. .SH "AUTHORS" .LP Konstantin Ryabitsev .SH "SEE ALSO" .LP epylog(8), Epylog(3), epylog.conf(5) epylog/man/Makefile.in0000644000175000017500000000251412527655413014201 0ustar tiagotiagotop_srcdir = @top_srcdir@ srcdir = @top_srcdir@ prefix = @prefix@ exec_prefix = @exec_prefix@ sbindir = @sbindir@ datadir = @datadir@ sysconfdir = @sysconfdir@ localstatedir = @localstatedir@ libdir = @libdir@ mandir = @mandir@ pkgdocdir = $(datadir)/doc/@PACKAGE@-@VERSION@ pkgdatadir = $(datadir)/@PACKAGE@ pkgvardir = $(localstatedir)/lib/@PACKAGE@ pkgconfdir = $(sysconfdir)/@PACKAGE@ PACKAGE = @PACKAGE@ VERSION = @VERSION@ INSTALL = @INSTALL@ INSTALL_PROGRAM = @INSTALL_PROGRAM@ INSTALL_DATA = @INSTALL_DATA@ INSTALL_SCRIPT = @INSTALL_SCRIPT@ COMPILEDIR_SCRIPT = $(top_srcdir)/compiledir CRON_DIR = @CRON_DIR@ TEMP_DIR = @TEMP_DIR@ PY_MODULE_DIR = @PY_MODULE_DIR@ PERL_MODULE_DIR = @PERL_MODULE_DIR@ MODULES_DIR = $(pkgdatadir)/modules LYNX_BIN = @LYNX_BIN@ INSTALLDIRS = $(mandir)/man8 $(mandir)/man5 MANFILES = epylog.8 epylog.conf.5 epylog-modules.5 all: install: all installdirs for MANFILE in $(MANFILES); do \ MANDIR=$(mandir)/man`echo $$MANFILE | sed -e 's/.*\.//g'`; \ $(INSTALL_DATA) $$MANFILE $(DESTDIR)$$MANDIR/$$MANFILE; \ done uninstall: for MANFILE in $(MANFILES); do \ MANDIR=$(mandir)/man`echo $$MANFILE | sed -e 's/.*\.//g'`; \ $(RM) $$MANDIR/$$MANFILE; \ done clean: distclean: clean $(RM) Makefile installdirs: for dir in $(INSTALLDIRS); do \ $(top_srcdir)/mkinstalldirs $(DESTDIR)/$$dir ; \ done epylog/man/epylog.conf.50000644000175000017500000001560112527655413014446 0ustar tiagotiago.TH "epylog.conf" "5" "1.0" "Konstantin Ryabitsev" "Applications/System" .SH NAME \fBepylog.conf\fR \- epylog configuration .SH SYNOPSIS \fBepylog\fR config file is a simple plaintext file in win.ini style format. .SH "Location" Epylog will look in /etc/epylog/epylog.conf by default, but you can override that by passing \-c switch on the command line. .SH "[main]" .TP .B cfgdir This is where epylog should look for other configuration information, most notably, \fBmodules.d\fR directory. See \fIepylog-modules(5)\fR for more info. .TP .B tmpdir Where to create temporary directories and put temporary files. Note that log files can grow VERY big and epylog might create several copies of them for processing purposes. Make sure there is no danger of filling up that partition. A good place on a designated loghost is /var/tmp, since that is usually a separate partition dedicated entirely for logs. .TP .B vardir Where epylog should save its state data, namely the offsets.xml file. The sanest place for this is /var/lib/epylog. .TP .B multimatch By default, if a line is matched against a module, no other modules will be tried. This helps speed things up tremendously. However, you may have several modules that process the same lines (although this is not a very good setup). In that case you may set this to "yes". The default value is "no". .TP .B threads How many processing threads to start. 50 is a good default value, but you may set it to less or more, depending on your system. .SH "[report]" .TP .B title What should be the title of the report. For mailed reports, this is the subject of the message. For the ones published on the web, this is the title of the page (as in ). .TP .B template Which html template should be used for the final report. See the source of the default template for the format used. .TP .B include_unparsed Can be either "yes" or "no". If "no" is specified, strings that didn't match any of the modules will not be appended to the report. Not very wise! A good setting is "yes". .TP .B publishers Lists the publishers to use. The value is the name of the section where to look for the publisher configuration. E.g.: .br .B publishers = nfspub .br will look for a section called "[nfspub]" for publisher initialization. The name of the publisher has nothing to do with the method it uses for publishing. The fact that the default are named [file] and [mail] is only a matter of convenience. List multiple values separated by a comma. .SH "Mail Publisher" .TP .B method Method must be set to "mail" for this publisher to be considered a mail publisher. .TP .B smtpserv Can be either a hostname of an SMTP server to use, or the location of a sendmail binary. If the value starts with a "/" it will be considered a path. E.g. valid entries: .br .B smtpserv = mail.example.com .br .B smtpserv = /usr/sbin/sendmail -t .TP .B mailto The list of email addresses where to mail the report. Separate multiple entries by a comma. If ommitted, "root@localhost" will be used. .TP .B format Can be one of the following: \fBhtml\fR, \fBplain\fR, or \fBboth\fR. If you use a mail client that doesn't support html mail, then you better use "plain" or "both", though you will miss out on visual cueing that epylog uses to notify of important events. .TP .B lynx This is only useful if you use format other than "html". Epylog will use a lynx-compliant tool to transform HTML into plain text. The following browsers are known to work: lynx, elinks, w3m. .TP .B include_rawlogs Whether to include the gzipped raw logs with the message. If set to "yes", it will attach the file with all processed logs with the message. If you use a file publisher in addition to the mail publisher, this may be a tad too paranoid. .TP .B rawlogs_limit If the size of rawlogs.gz is more than this setting (in kilobytes), then raw logs will not be attached. Useful if you have a 50Mb log and check your mail over a slow uplink. .TP .B gpg_encrypt Logs routinely contain sensitive information, so you may want to encrypt the email report to ensure that nobody can read it other than designated administrators. Set to "yes" to enable gpg-encryption of the mail report. You will need to install mygpgme (installed by default on all yum-managed systems). .TP .B gpg_keyringdir If you don't want to use the default keyring (usually /root/.gnupg), you can set up a separate keyring directory for epylog's use. E.g.: .br > mkdir -m 0700 /etc/epylog/gpg .TP .B gpg_recipients List of PGP key id's to use when encrypting the report. The keys must be in the pubring specified in gpg_keyringdir. If this option is omitted, epylog will encrypt to all keys found in the pubring. To add a public key to a keyring, you can use the following command. .br > gpg [--homedir=/etc/epylog/gpg] --import pubkey.gpg .br You can generate the pubkey.gpg file by running "gpg --export KEYID" on your workstation, or you can use "gpg --search" to import the public keys from the keyserver. .TP .B gpg_signers To use the signing option, you will first need to generate a private key: .br > gpg [--homedir=/etc/epylog/gpg] --gen-key .br Create a \fBsign-only RSA key\fR and leave the passphrase empty. You can then use "gpg --export" to export the key you have generated and import it on the workstation where you read mail. .br If gpg_signers is not set, the report will not be signed. .SH "File Publisher" .TP .B method Method must be set to "file" for this config to work as a file publisher. .TP .B path Where to place the directories with reports. A sensible location would be in /var/www/html/epylog. Note that the reports may contain sensitive information, so make sure you place a .htaccess in that directory and require a password, or limit by host. .TP .B dirmask, filemask These are the masks to be used for the created directories and files. For format values look at strftime documentation here: http://www.python.org/doc/current/lib/module-time.html .TP .B save_rawlogs Whether to save the raw logs in a file in the same directory as the report. The default is off, since you can easily look in the original log sources. .TP .B expire_in A digit specifying the number of days after which the old directories should be removed. Default is 7. .TP .B notify Optionally send notifications to these email addresses when new reports become available. Comment out if no notification is desired. This is definitely redundant if you also use the mail publisher. .TP .B smtpserv Use this smtp server when sending notifications. Can be either a hostname or a path to sendmail. Defaults to "/usr/sbin/sendmail -t". .TP .B pubroot When generating a notification message, use this as publication root to make a link. E.g.: .br .B pubroot = http://www.example.com/epylog .br will make a link: http://www.example.com/epylog/dirname/filename.html .SH "COMMENTS" Lines starting with "#" will be considered commented out. .SH "AUTHORS" .LP Konstantin Ryabitsev .SH "SEE ALSO" .LP epylog(3), epylog(8), epylog-modules(5) epylog/man/epylog.80000644000175000017500000000776412527655413013540 0ustar tiagotiago.TH "epylog" "8" "1.0" "Konstantin Ryabitsev" "Applications/System" .SH NAME \fBepylog\fR \- Syslog new log notifier and parser. .SH SYNOPSIS \fBepylog\fR [\-c epylog.conf] [\-d LOGLEVEL] [\-\-last PERIOD] [\-\-store\-offsets] [\-\-quiet] [\-\-cron] .SH DESCRIPTION Epylog is a new log notifier and parser which runs periodically out of cron, looks at your logs, processes the entries in order to present them in a more comprehensive format, and then provides you with the output. It is written specifically with large network clusters in mind where a lot of machines (around 50 and upwards) log to the same loghost using syslog or syslog\-ng. Alternatively, Epylog can be invoked from the command line and provide a log report based on a certain provided time period. In this case it relies on syslog timestamps to find the offsets, as opposed to the end-of-log offsets stored during the last run, though this behavior is not as reliable and is easily thwarted by skewed clocks. .SH OPTIONS .TP .B \-c config.file Provide an alternative config file to Epylog. By default, it will look in /etc/epylog/epylog.conf. .TP .B \-d LOGLEVEL Logging level. The default is 1. 0 will produce no output except for critical errors (useful for cron runs). 2 and above are debugging levels. 5 is the most verbose. .TP .B \-\-last PERIOD Will make a report on events that occurred in the last PERIOD. PERIOD can be either "hour", "day", "week", "month", or more granular: "1h", "2h", "3d", "2w", etc. When \-\-last is specified, epylog will ignore the saved offsets and locate the entries by timestamps. \fICAUTION\fR: this process is not to be trusted, since the timestamps are not checked for any validity when arriving to the loghost. One reporting machine with a skewed clock may confuse Epylog enough to miss a lot of valid entries. .TP .B \-\-store\-offsets When specified, will store the offset of the last log entry processed in offsets.xml. During the cron runs epylog relies on the offset information to find out what new entries to process. This is more trustworthy than relying on timestamps. The default behavior is not to store the offsets, as this allows to run epylog both from cron and manually without the two interfering with each-other. The location of offset.xml is specified in epylog.conf. See \fBepylog.conf(5)\fR for more details. .TP .B \-\-quiet In every way identical to \-d 0. .TP .B \-\-cron This is essentially \-\-quiet \-\-store\-offsets, plus a lockfile will be created and consulted, preventing more than one instance of epylog from running. You can still run epylog manually \-\- the lockfile is only checked when running in \-\-cron mode. .SH "FEATURES" .RS The core of epylog is written in python. It handles things like timestamp lookups, unwrapping of "last message repeated" lines, handling of rotated files, preparing and publishing the reports, etc. The modules are pluggable and can be either "internal", written in python, or external. External modules can be written in any language, but at a price of some convenience. For more info see \fBepylog-modules(5)\fR. .SH "INITIAL RUN" .RS Depending on the size of your logs, you might want to initialize your offsets before letting epylog run from cron. When the offsets.xml file is missing, epylog will by default process the entire log, and depending on your configuration, that can be a lot of entries. A good way to init epylog is to run: .LP .B epylog \-\-last day \-\-store\-offsets .SH "FILES" .LP .I /etc/epylog/epylog.conf .br .I /usr/sbin/epylog .br .I /etc/cron.daily/epylog.cron .br .I /etc/epylog/* .br .I /var/lib/epylog/* .br .I /usr/share/epylog/modules/* .SH "EXAMPLES" .LP The useful way to run from a command line is with \-\-last. E.g.: .LP .B epylog \-\-last day .br .B epylog \-\-last 2w .LP When running from cron, you want to store the offsets and not rely on timestamps. There is a mode that allows you to do this: .LP .B epylog \-\-cron .SH "AUTHORS" .LP Konstantin Ryabitsev .SH "SEE ALSO" .LP epylog.conf(5) epylog-modules(5)