pax_global_header00006660000000000000000000000064135356624220014522gustar00rootroot0000000000000052 comment=93c4290d7bf3c57da48e8a89d0acc3111bcffdd5 fever-1.0.8/000077500000000000000000000000001353566242200126375ustar00rootroot00000000000000fever-1.0.8/.circleci/000077500000000000000000000000001353566242200144725ustar00rootroot00000000000000fever-1.0.8/.circleci/config.yml000066400000000000000000000007351353566242200164670ustar00rootroot00000000000000# Golang CircleCI 2.0 configuration file # # Check https://circleci.com/docs/2.0/language-go/ for more details version: 2 jobs: build: docker: - image: circleci/golang:1.10-stretch working_directory: /go/src/github.com/DCSO/fever steps: - checkout - run: name: Install test dependencies command: 'sudo apt-get update && sudo apt-get install redis-server -y' - run: go get -v -t -d ./... - run: go test -v ./... fever-1.0.8/.gitignore000066400000000000000000000000351353566242200146250ustar00rootroot00000000000000*.eve.json *.eve.json.gz .vs fever-1.0.8/LICENSE000066400000000000000000000030421353566242200136430ustar00rootroot00000000000000Copyright (c) 2017, 2018, 2019, DCSO Deutsche Cyber-Sicherheitsorganisation GmbH All rights reserved. Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met: * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer. * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution. * Neither the name of the DCSO Deutsche Cyber-Sicherheitsorganisation GmbH nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. fever-1.0.8/README.md000066400000000000000000000243751353566242200141310ustar00rootroot00000000000000# 🔥 FEVER [![CircleCI](https://circleci.com/gh/DCSO/fever.svg?style=svg)](https://circleci.com/gh/DCSO/fever) The Fast, Extensible, Versatile Event Router (FEVER) is a tool for fast processing of events from Suricata's JSON EVE output. What is meant by 'processing' is defined by a number of modular components, for example facilitating fast ingestion into a database. Other processors implement collection, aggregation and forwarding of various metadata (e.g. aggregated and raw flows, passive DNS data, etc.) as well as performance metrics. It is meant to be used in front of (or as a replacement for) general-purpose log processors like Logstash to increase event throughput as observed on sensors that see a lot of traffic. ## Building Like any good Go program: ``` $ go get -t ./... $ go build ./... $ go install -v ./... ... $ fever run -h ``` ## Usage ``` $ ./fever run -h The 'run' command starts the FEVER service, consuming events from the input and executing all processing components. Usage: fever run [flags] Flags: --active-rdns enable active rDNS enrichment for src/dst IPs --active-rdns-cache-expiry duration cache expiry interval for rDNS lookups (default 2m0s) --active-rdns-private-only only do active rDNS enrichment for RFC1918 IPs --bloom-alert-prefix string String prefix for Bloom filter alerts (default "BLF") --bloom-blacklist-iocs strings Blacklisted strings in Bloom filter (will cause filter to be rejected) (default [/,/index.htm,/index.html]) -b, --bloom-file string Bloom filter for external indicator screening -z, --bloom-zipped use gzipped Bloom filter file -c, --chunksize uint chunk size for batched event handling (e.g. inserts) (default 50000) --context-enable collect and forward flow context for alerted flows --context-submission-exchange string Exchange to which flow context events will be submitted (default "context") --context-submission-url string URL to which flow context will be submitted (default "amqp://guest:guest@localhost:5672/") -d, --db-database string database DB (default "events") --db-enable write events to database -s, --db-host string database host (default "localhost:5432") --db-maxtablesize uint Maximum allowed cumulative table size in GB (default 500) -m, --db-mongo use MongoDB -p, --db-password string database password (default "sensor") --db-rotate duration time interval for database table rotations (default 1h0m0s) -u, --db-user string database user (default "sensor") --dummy log locally instead of sending home --flowextract-bloom-selector string IP address Bloom filter to select flows to extract --flowextract-enable extract and forward flow metadata --flowextract-submission-exchange string Exchange to which raw flow events will be submitted (default "flows") --flowextract-submission-url string URL to which raw flow events will be submitted (default "amqp://guest:guest@localhost:5672/") -n, --flowreport-interval duration time interval for report submissions --flowreport-nocompress send uncompressed flow reports (default is gzip) --flowreport-submission-exchange string Exchange to which flow reports will be submitted (default "aggregations") --flowreport-submission-url string URL to which flow reports will be submitted (default "amqp://guest:guest@localhost:5672/") --flushcount uint maximum number of events in one batch (e.g. for flow extraction) (default 100000) -f, --flushtime duration time interval for event aggregation (default 1m0s) -T, --fwd-all-types forward all event types -t, --fwd-event-types strings event types to forward to socket (default [alert,stats]) -h, --help help for run -r, --in-redis string Redis input server (assumes "suricata" list key, no pwd) --in-redis-nopipe do not use Redis pipelining -i, --in-socket string filename of input socket (accepts EVE JSON) (default "/tmp/suri.sock") --ip-alert-prefix string String prefix for IP blacklist alerts (default "IP-BLACKLIST") --ip-blacklist string List with IP ranges to alert on --logfile string Path to log file --logjson Output logs in JSON format --metrics-enable submit performance metrics to central sink --metrics-submission-exchange string Exchange to which metrics will be submitted (default "metrics") --metrics-submission-url string URL to which metrics will be submitted (default "amqp://guest:guest@localhost:5672/") -o, --out-socket string path to output socket (to forwarder), empty string disables forwarding (default "/tmp/suri-forward.sock") --pdns-enable collect and forward aggregated passive DNS data --pdns-submission-exchange string Exchange to which passive DNS events will be submitted (default "pdns") --pdns-submission-url string URL to which passive DNS events will be submitted (default "amqp://guest:guest@localhost:5672/") --profile string enable runtime profiling to given file --reconnect-retries uint number of retries connecting to socket or sink, 0 = no retry limit --toolname string set toolname (default "fever") -v, --verbose enable verbose logging (debug log level) Global Flags: --config string config file (default is $HOME/.fever.yaml) ``` It is also possible to use a config file in YAML format ([Example](fever.yaml)). Configuration is cascading: first settings are loaded from the config file and can then be overridden by command line parameters. ## Running tests The test suite requires a Redis executable in the current path. Most simply, this requirement can be satisfied by just installing Redis. For instance, via `apt`: ``` $ apt install redis-server ``` Then the test suite can be run via Go's generic testing framework: ``` $ go test -v -race -cover ./... ... ``` ## Suricata settings The tool is designed to consume JSON events from a socket, by default `/tmp/suri.sock`. This can be enabled using the following setting in `suricata.yaml`: ```yaml ... # Extensible Event Format (nicknamed EVE) event log in JSON format - eve-log: enabled: yes filetype: unix_stream filename: /tmp/suri.sock ... ``` All JSON is also passed through to another socket, which allows to plug it between Suricata and another log consumer, e.g. Logstash and friends. Another way to consume events is via Redis. Use the `-r` parameters to specify a Redis host, the key `suricata` will be queried as a list to BRPOP events from. ## Important settings - Database connection: use the `-db-*` parameters to specify a database connection. PostgreSQL 9.5 or later is required. Use `-m` to use the parameters as MongoDB connection parameters instead. - Chunk size: determines the number of events that is imported as a whole at the same time. Larger values may be faster and lead to better throughput, but will use more RAM and also lose more events in case a bulk import (=transaction) fails. Smaller values will increase the overhead on the database. - Profiling: optional output of a pprof file to be used with `go tool pprof`. - Table rotation: tables are created as unlogged tables without indexes for maximal write performance. To keep table sizes in check, tables are timestamped and rotated in a time interval chosen by the user, e.g. 1h. Index creation is deferred until a table is rotated away and no longer written to, and also happens in the background. Indexing jobs are queued so if indexing takes longer than one rotation period, data should not be lost. - Event forwarding: Events processed by FEVER can be forwarded to another socket to be processed by a downstream tool, e.g. Logstash. By default, only `alert` and `stats` event types are forwarded, but the set of forwarded types can be extended using `-t ` for each additional type to be forwarded. As a catch-all (and probably the best option for sensors still running a full ELK stack) the option `-T` will forward everything. - Bloom filters can be reloaded by sending a `SIGUSR1` to the main process. ## Development test runs with local data Create local socket to consume forwarded events. You can also use [pv](http://www.ivarch.com/programs/pv.shtml) to monitor if data is flowing and how much (you may need to install the necessary tools using `apt install pv netcat-openbsd` before): ```bash $ nc -klU /tmp/suri-forward.sock | pv > /dev/null ``` Instead of simply sending it to `/dev/null`, one can of course filter the output using `jq` etc. to visually confirm that certain output is forwarded. Start the service: ```bash $ ./fever run -v -n 0 -l '' & ``` The `-n 0` option disables submission of flow metadata. Optionally, `--dummy`/`--nodb` can be used to disable database inserts and only test input parsing and metadata aggregation. Finally, push test data into the input socket: ```bash $ head -n 100000 huge.eve.json | socat /tmp/suri.sock STDIO ``` which would feed the first 100k events from `huge.eve.json` into the socket. The `socat` tool can be installed as usual via `apt install socat`. To feed EVE data into FEVER using Redis (started with `-r`), you can simply LPUSH the JSON events into a list referenced by the key `suricata`. Use the Lua script `scripts/makelpush` to convert raw EVE lines into Redis statements: ``` $ head -n 100000 huge.eve.json | scripts/makelpush | redis-cli > /dev/null ``` ## Author/Contact Sascha Steinbiss ## License BSD-3-clause fever-1.0.8/cmd/000077500000000000000000000000001353566242200134025ustar00rootroot00000000000000fever-1.0.8/cmd/fever/000077500000000000000000000000001353566242200145115ustar00rootroot00000000000000fever-1.0.8/cmd/fever/cmds/000077500000000000000000000000001353566242200154375ustar00rootroot00000000000000fever-1.0.8/cmd/fever/cmds/makeman.go000066400000000000000000000013761353566242200174060ustar00rootroot00000000000000package cmd import ( log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/cobra/doc" ) // mmanCmd represents the makeman command var mmanCmd = &cobra.Command{ Use: "makeman [options]", Short: "Create man pages", Run: func(cmd *cobra.Command, args []string) { targetDir, err := cmd.Flags().GetString("dir") if err != nil { log.Fatal(err) } header := &doc.GenManHeader{} err = doc.GenManTree(rootCmd, header, targetDir) if err != nil { log.Fatal(err) } for _, v := range rootCmd.Commands() { err = doc.GenManTree(v, header, targetDir) if err != nil { log.Fatal(err) } } }, } func init() { rootCmd.AddCommand(mmanCmd) mmanCmd.Flags().StringP("dir", "d", ".", "target directory for man pages") } fever-1.0.8/cmd/fever/cmds/root.go000066400000000000000000000033701353566242200167540ustar00rootroot00000000000000package cmd // DCSO FEVER // Copyright (c) 2018, DCSO GmbH import ( "fmt" "os" homedir "github.com/mitchellh/go-homedir" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" ) var cfgFile string // rootCmd represents the base command when called without any subcommands var rootCmd = &cobra.Command{ Use: "fever", Short: "fast, extensible and flexible event router", Long: `FEVER is a fast execution engine for processing, aggregation and reporting components that act on Suricata's EVE output.`, } // Execute adds all child commands to the root command and sets flags appropriately. // This is called by main.main(). It only needs to happen once to the rootCmd. func Execute() { if err := rootCmd.Execute(); err != nil { fmt.Println(err) os.Exit(1) } } func init() { cobra.OnInitialize(initConfig) // Here you will define your flags and configuration settings. // Cobra supports persistent flags, which, if defined here, // will be global for your application. rootCmd.PersistentFlags().StringVar(&cfgFile, "config", "", "config file (default is $HOME/.fever.yaml)") } // initConfig reads in config file and ENV variables if set. func initConfig() { if cfgFile != "" { // Use config file from the flag. viper.SetConfigFile(cfgFile) } else { // Find home directory. home, err := homedir.Dir() if err != nil { fmt.Println(err) os.Exit(1) } // Search config in home directory with name ".fever" (without extension). viper.AddConfigPath(home) viper.SetConfigName(".fever") } viper.AutomaticEnv() // read in environment variables that match // If a config file is found, read it in. if err := viper.ReadInConfig(); err == nil { log.Infof("Using config file: %s", viper.ConfigFileUsed()) } } fever-1.0.8/cmd/fever/cmds/run.go000066400000000000000000000571521353566242200166040ustar00rootroot00000000000000package cmd // DCSO FEVER // Copyright (c) 2017, 2018, 2019, DCSO GmbH import ( "io" "os" "os/signal" "runtime/pprof" "syscall" "time" "github.com/DCSO/fever/db" "github.com/DCSO/fever/input" "github.com/DCSO/fever/processing" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" "github.com/NeowayLabs/wabbit" "github.com/NeowayLabs/wabbit/amqp" log "github.com/sirupsen/logrus" "github.com/spf13/cobra" "github.com/spf13/viper" ) var dispatcher *processing.HandlerDispatcher var forward bool const defaultQueueSize = 50000 func mainfunc(cmd *cobra.Command, args []string) { var s db.Slurper var err error var submitter util.StatsSubmitter var statssubmitter util.StatsSubmitter var pse *util.PerformanceStatsEncoder eventChan := make(chan types.Entry, defaultQueueSize) util.ToolName = viper.GetString("toolname") logfilename := viper.GetString("logging.file") if len(logfilename) > 0 { log.Println("Switching to log file", logfilename) file, err := os.OpenFile(logfilename, os.O_APPEND|os.O_CREATE|os.O_WRONLY, 0640) if err != nil { log.Fatal(err) } defer file.Close() log.SetFormatter(&log.TextFormatter{ DisableColors: true, FullTimestamp: true, }) log.SetOutput(file) } logjson := viper.GetBool("logging.json") if logjson { log.SetFormatter(&log.JSONFormatter{}) } verbose := viper.GetBool("verbose") if verbose { log.Info("verbose log output enabled") log.SetLevel(log.DebugLevel) } dummyMode := viper.GetBool("dummy") enableMetrics := viper.GetBool("metrics.enable") if err != nil { log.Fatal(err) } if enableMetrics { if dummyMode { statssubmitter, err = util.MakeDummySubmitter() if err != nil { log.Fatal(err) } } else { metricsSubmissionURL := viper.GetString("metrics.submission-url") metricsSubmissionExchange := viper.GetString("metrics.submission-exchange") statssubmitter, err = util.MakeAMQPSubmitterWithReconnector(metricsSubmissionURL, metricsSubmissionExchange, verbose, func(amqpURI string) (wabbit.Conn, error) { conn, err := amqp.Dial(amqpURI) if err != nil { return nil, err } return conn, err }) if err != nil { log.Fatal(err) } } // create InfluxDB line protocol encoder/submitter pse = util.MakePerformanceStatsEncoder(statssubmitter, 10*time.Second, dummyMode) } // create dispatcher dispatcher = processing.MakeHandlerDispatcher(eventChan) if pse != nil { dispatcher.SubmitStats(pse) } dispatcher.Run() defer func() { c := make(chan bool) dispatcher.Stop(c) <-c }() // create event type counter if enableMetrics { evp, err := processing.MakeEventProfiler(10*time.Second, statssubmitter) if err != nil { log.Fatal(err) } dispatcher.RegisterHandler(evp) evp.Run() defer func() { c := make(chan bool) evp.Stop(c) <-c }() } // Configure forwarding outputSocket := viper.GetString("output.socket") forward = (outputSocket != "") eventTypes := viper.GetStringSlice("forward.types") allTypes := viper.GetBool("forward.all") util.PrepareEventFilter(eventTypes, allTypes) // Optional profiling profileFile := viper.GetString("profile") if profileFile != "" { var f io.Writer f, err = os.Create(profileFile) if err != nil { log.Fatal(err) } pprof.StartCPUProfile(f) defer pprof.StopCPUProfile() } // Set up database writing components DBenabled := viper.GetBool("database.enable") chunkSize := viper.GetInt("chunksize") if DBenabled { dbUseMongo := viper.GetBool("database.mongo") dbHost := viper.GetString("database.host") dbDatabase := viper.GetString("database.database") dbUser := viper.GetString("database.user") dbPassword := viper.GetString("database.password") maxTableSize := viper.GetInt64("database.maxtablesize") if dbUseMongo { s = db.MakeMongoSlurper(dbHost, dbDatabase, dbUser, dbPassword, int(chunkSize), int64(maxTableSize)) } else { rotationInterval := viper.GetDuration("database.rotate") s = db.MakePostgresSlurper(dbHost, dbDatabase, dbUser, dbPassword, rotationInterval, int64(maxTableSize), int(chunkSize)) } } else { if verbose { log.Println("database not in use") } s = &db.DummySlurper{} } s.Run(eventChan) var forwardHandler processing.Handler reconnectTimes := viper.GetInt("reconnect-retries") // start forwarding if forward { forwardHandler = processing.MakeForwardHandler(int(reconnectTimes), outputSocket) if pse != nil { forwardHandler.(*processing.ForwardHandler).SubmitStats(pse) } rdns := viper.GetBool("active.rdns") if rdns { expiryPeriod := viper.GetDuration("active.rdns-cache-expiry") forwardHandler.(*processing.ForwardHandler).EnableRDNS(expiryPeriod) privateOnly := viper.GetBool("active.rdns-private-only") if privateOnly { forwardHandler.(*processing.ForwardHandler).RDNSHandler.EnableOnlyPrivateIPRanges() } } forwardHandler.(*processing.ForwardHandler).Run() defer func() { c := make(chan bool) forwardHandler.(*processing.ForwardHandler).Stop(c) <-c }() } else { // in this case we use a void handler that does nothing forwardHandler = processing.MakeVoidHandler() } dispatcher.RegisterHandler(forwardHandler) // Bloom filter setup bloomFilePath := viper.GetString("bloom.file") bloomAlertPrefix := viper.GetString("bloom.alert-prefix") bloomCompressed := viper.GetBool("bloom.zipped") bloomBlacklist := viper.GetStringSlice("bloom.blacklist-iocs") var bloomHandler *processing.BloomHandler if bloomFilePath != "" { bloomHandler, err = processing.MakeBloomHandlerFromFile(bloomFilePath, bloomCompressed, eventChan, forwardHandler, bloomAlertPrefix, bloomBlacklist) if err != nil { log.Fatal(err) } dispatcher.RegisterHandler(bloomHandler) } ipFilePath := viper.GetString("ip.blacklist") ipAlertPrefix := viper.GetString("ip.alert-prefix") var ipHandler *processing.IPHandler if ipFilePath != "" { ipHandler, err = processing.MakeIPHandlerFromFile(ipFilePath, eventChan, forwardHandler, ipAlertPrefix) if err != nil { log.Fatal(err) } dispatcher.RegisterHandler(ipHandler) } // flow aggregation setup flushPeriod := viper.GetDuration("flushtime") log.Debugf("flushtime set to %v", flushPeriod) fa := processing.MakeFlowAggregator(flushPeriod, eventChan) if pse != nil { fa.SubmitStats(pse) } dispatcher.RegisterHandler(fa) fa.Run() defer func() { c := make(chan bool) fa.Stop(c) <-c }() // DNS aggregation setup da := processing.MakeDNSAggregator(flushPeriod, eventChan) if pse != nil { da.SubmitStats(pse) } dispatcher.RegisterHandler(da) da.Run() defer func() { c := make(chan bool) da.Stop(c) <-c }() // context collector setup enableContext := viper.GetBool("context.enable") if enableContext { var csubmitter util.StatsSubmitter if dummyMode { csubmitter, err = util.MakeDummySubmitter() if err != nil { log.Fatal(err) } } else { cSubmissionURL := viper.GetString("context.submission-url") cSubmissionExchange := viper.GetString("context.submission-exchange") csubmitter, err = util.MakeAMQPSubmitter(cSubmissionURL, cSubmissionExchange, verbose) if err != nil { log.Fatal(err) } csubmitter.UseCompression() defer csubmitter.Finish() } cshp := processing.ContextShipperAMQP{} shipChan, err := cshp.Start(csubmitter) if err != nil { log.Fatal(err) } processing.GlobalContextCollector = processing.MakeContextCollector( func(entries processing.Context, logger *log.Entry) error { shipChan <- entries return nil }, viper.GetDuration("context.cache-timeout"), ) dispatcher.RegisterHandler(processing.GlobalContextCollector) if pse != nil { processing.GlobalContextCollector.SubmitStats(pse) } processing.GlobalContextCollector.Run() defer func() { c := make(chan bool) processing.GlobalContextCollector.Stop(c) <-c }() } // passive DNS setup enablePDNS := viper.GetBool("pdns.enable") if enablePDNS { var pdcsubmitter util.StatsSubmitter if dummyMode { pdcsubmitter, err = util.MakeDummySubmitter() if err != nil { log.Fatal(err) } } else { pdnsSubmissionURL := viper.GetString("pdns.submission-url") pdnsSubmissionExchange := viper.GetString("pdns.submission-exchange") pdcsubmitter, err = util.MakeAMQPSubmitter(pdnsSubmissionURL, pdnsSubmissionExchange, verbose) if err != nil { log.Fatal(err) } pdcsubmitter.UseCompression() defer pdcsubmitter.Finish() } pdc, err := processing.MakePDNSCollector(flushPeriod, pdcsubmitter) if err != nil { log.Fatal(err) } dispatcher.RegisterHandler(pdc) pdc.Run() defer func() { c := make(chan bool) pdc.Stop(c) <-c }() } else { log.Info("passive DNS collection disabled") } noCompressMsg := viper.GetBool("flowreport.nocompress") // Aggregate stats reporting setup unicornSleep := viper.GetDuration("flowreport.interval") if unicornSleep > 0 { var submitter util.StatsSubmitter if dummyMode { submitter, err = util.MakeDummySubmitter() if err != nil { log.Fatal(err) } } else { unicornSubmissionURL := viper.GetString("flowreport.submission-url") unicornSubmissionExchange := viper.GetString("flowreport.submission-exchange") submitter, err = util.MakeAMQPSubmitter(unicornSubmissionURL, unicornSubmissionExchange, verbose) if err != nil { log.Fatal(err) } defer submitter.Finish() } if !noCompressMsg { submitter.UseCompression() log.WithFields(log.Fields{ "domain": "aggregate", "state": "enabled", }).Info("compression of flow stats") } else { log.WithFields(log.Fields{ "domain": "aggregate", "state": "disabled", }).Info("compression of flow stats") } ua := processing.MakeUnicornAggregator(submitter, unicornSleep, dummyMode) dispatcher.RegisterHandler(ua) ua.Run() defer func() { c := make(chan bool) ua.Stop(c) <-c }() } else { log.WithFields(log.Fields{ "domain": "aggregate", }).Info("flow stats reporting disabled") } // Flow extraction extractFlows := viper.GetBool("flowextract.enable") if extractFlows { var submitter util.StatsSubmitter if dummyMode { submitter, err = util.MakeDummySubmitter() if err != nil { log.Fatal(err) } } else { flowSubmissionURL := viper.GetString("flowextract.submission-url") flowSubmissionExchange := viper.GetString("flowextract.submission-exchange") submitter, err = util.MakeAMQPSubmitter(flowSubmissionURL, flowSubmissionExchange, verbose) if err != nil { log.Fatal(err) } defer submitter.Finish() } if noCompressMsg { submitter.UseCompression() log.WithFields(log.Fields{ "domain": "flow-extraction", "state": "enabled", }).Info("compression of flows") } else { log.WithFields(log.Fields{ "domain": "flow-extraction", "state": "disabled", }).Info("no compression of flows") } flushCount := viper.GetInt("flushcount") flowBloomFilePath := viper.GetString("flowextract-bloom-selector") ua, err := processing.MakeFlowExtractor(flushPeriod, int(flushCount), flowBloomFilePath, submitter) if err != nil { log.Fatal(err) } dispatcher.RegisterHandler(ua) ua.Run() defer func() { c := make(chan bool) ua.Stop(c) <-c }() } else { log.WithFields(log.Fields{ "domain": "flow-extraction", }).Info("Flow extraction disabled") } c := make(chan os.Signal, 1) signal.Notify(c, os.Interrupt, syscall.SIGTERM, syscall.SIGINT, syscall.SIGUSR1) go func() { for sig := range c { if sig == syscall.SIGTERM || sig == syscall.SIGINT { pprof.StopCPUProfile() if submitter != nil { submitter.Finish() } if s != nil { s.Finish() } log.WithFields(log.Fields{ "domain": "main", }).Println("received SIGTERM, terminating") inputSocket := viper.GetString("input.socket") _, myerr := os.Stat(inputSocket) if myerr == nil { os.Remove(inputSocket) } os.Exit(1) } else if sig == syscall.SIGUSR1 { if bloomHandler != nil { err := bloomHandler.Reload() if err != nil { log.Warnf("reloading of Bloom filter failed: %s", err.Error()) } else { log.Info("reloading of Bloom complete") } } } } }() // create input inputChan := make(chan types.Entry) var sinput input.Input inputRedis := viper.GetString("input.redis.server") noUseRedisPipeline := viper.GetBool("input.redis.nopipe") if len(inputRedis) > 0 { sinput, err = input.MakeRedisInput(inputRedis, inputChan, int(chunkSize)) sinput.(*input.RedisInput).UsePipelining = !noUseRedisPipeline sinput.(*input.RedisInput).SubmitStats(pse) } else { inputSocket := viper.GetString("input.socket") sinput, err = input.MakeSocketInput(inputSocket, inputChan) } if err != nil { log.Fatal(err) } log.WithFields(log.Fields{ "input": sinput.GetName(), }).Info("selected input driver") sinput.SetVerbose(verbose) sinput.Run() defer func() { c := make(chan bool) sinput.Stop(c) <-c }() for v := range inputChan { dispatcher.Dispatch(&v) } } var runCmd = &cobra.Command{ Use: "run", Short: "start FEVER service", Long: `The 'run' command starts the FEVER service, consuming events from the input and executing all processing components.`, Run: mainfunc, } func init() { rootCmd.AddCommand(runCmd) // Input options runCmd.PersistentFlags().StringP("in-socket", "i", "/tmp/suri.sock", "filename of input socket (accepts EVE JSON)") viper.BindPFlag("input.socket", runCmd.PersistentFlags().Lookup("in-socket")) runCmd.PersistentFlags().StringP("in-redis", "r", "", "Redis input server (assumes \"suricata\" list key, no pwd)") viper.BindPFlag("input.redis.server", runCmd.PersistentFlags().Lookup("in-redis")) runCmd.PersistentFlags().BoolP("in-redis-nopipe", "", false, "do not use Redis pipelining") viper.BindPFlag("input.redis.nopipe", runCmd.PersistentFlags().Lookup("in-redis-nopipe")) // Output options runCmd.PersistentFlags().StringP("out-socket", "o", "/tmp/suri-forward.sock", "path to output socket (to forwarder), empty string disables forwarding") viper.BindPFlag("output.socket", runCmd.PersistentFlags().Lookup("out-socket")) // Forwarding options runCmd.PersistentFlags().StringSliceP("fwd-event-types", "t", []string{"alert", "stats"}, "event types to forward to socket") viper.BindPFlag("forward.types", runCmd.PersistentFlags().Lookup("fwd-event-types")) runCmd.PersistentFlags().BoolP("fwd-all-types", "T", false, "forward all event types") viper.BindPFlag("forward.all", runCmd.PersistentFlags().Lookup("fwd-all-types")) // Misc options runCmd.PersistentFlags().StringP("profile", "", "", "enable runtime profiling to given file") viper.BindPFlag("profile", runCmd.PersistentFlags().Lookup("profile")) runCmd.PersistentFlags().BoolP("verbose", "v", false, "enable verbose logging (debug log level)") viper.BindPFlag("verbose", runCmd.PersistentFlags().Lookup("verbose")) runCmd.PersistentFlags().UintP("chunksize", "c", 50000, "chunk size for batched event handling (e.g. inserts)") viper.BindPFlag("chunksize", runCmd.PersistentFlags().Lookup("chunksize")) runCmd.PersistentFlags().BoolP("dummy", "", false, "log locally instead of sending home") viper.BindPFlag("dummy", runCmd.PersistentFlags().Lookup("dummy")) runCmd.PersistentFlags().UintP("reconnect-retries", "", 0, "number of retries connecting to socket or sink, 0 = no retry limit") viper.BindPFlag("reconnect-retries", runCmd.PersistentFlags().Lookup("reconnect-retries")) runCmd.PersistentFlags().DurationP("flushtime", "f", 1*time.Minute, "time interval for event aggregation") viper.BindPFlag("flushtime", runCmd.PersistentFlags().Lookup("flushtime")) runCmd.PersistentFlags().UintP("flushcount", "", 100000, "maximum number of events in one batch (e.g. for flow extraction)") viper.BindPFlag("flushcount", runCmd.PersistentFlags().Lookup("flushcount")) runCmd.PersistentFlags().StringP("toolname", "", "fever", "set toolname") viper.BindPFlag("toolname", runCmd.PersistentFlags().Lookup("toolname")) // Database options runCmd.PersistentFlags().BoolP("db-enable", "", false, "write events to database") viper.BindPFlag("database.enable", runCmd.PersistentFlags().Lookup("db-enable")) runCmd.PersistentFlags().StringP("db-host", "s", "localhost:5432", "database host") viper.BindPFlag("database.host", runCmd.PersistentFlags().Lookup("db-host")) runCmd.PersistentFlags().StringP("db-user", "u", "sensor", "database user") viper.BindPFlag("database.user", runCmd.PersistentFlags().Lookup("db-user")) runCmd.PersistentFlags().StringP("db-database", "d", "events", "database DB") viper.BindPFlag("database.database", runCmd.PersistentFlags().Lookup("db-database")) runCmd.PersistentFlags().StringP("db-password", "p", "sensor", "database password") viper.BindPFlag("database.password", runCmd.PersistentFlags().Lookup("db-password")) runCmd.PersistentFlags().BoolP("db-mongo", "m", false, "use MongoDB") viper.BindPFlag("database.mongo", runCmd.PersistentFlags().Lookup("db-mongo")) runCmd.PersistentFlags().DurationP("db-rotate", "", 1*time.Hour, "time interval for database table rotations") viper.BindPFlag("database.rotate", runCmd.PersistentFlags().Lookup("db-rotate")) runCmd.PersistentFlags().Uint64P("db-maxtablesize", "", 500, "Maximum allowed cumulative table size in GB") viper.BindPFlag("database.maxtablesize", runCmd.PersistentFlags().Lookup("db-maxtablesize")) // Flow report options runCmd.PersistentFlags().BoolP("flowreport-nocompress", "", false, "send uncompressed flow reports (default is gzip)") viper.BindPFlag("flowreport.nocompress", runCmd.PersistentFlags().Lookup("flowreport-nocompress")) runCmd.PersistentFlags().StringP("flowreport-submission-url", "", "amqp://guest:guest@localhost:5672/", "URL to which flow reports will be submitted") viper.BindPFlag("flowreport.submission-url", runCmd.PersistentFlags().Lookup("flowreport-submission-url")) runCmd.PersistentFlags().StringP("flowreport-submission-exchange", "", "aggregations", "Exchange to which flow reports will be submitted") viper.BindPFlag("flowreport.submission-exchange", runCmd.PersistentFlags().Lookup("flowreport-submission-exchange")) runCmd.PersistentFlags().DurationP("flowreport-interval", "n", 0, "time interval for report submissions") viper.BindPFlag("flowreport.interval", runCmd.PersistentFlags().Lookup("flowreport-interval")) // Metrics submission options runCmd.PersistentFlags().BoolP("metrics-enable", "", false, "submit performance metrics to central sink") viper.BindPFlag("metrics.enable", runCmd.PersistentFlags().Lookup("metrics-enable")) runCmd.PersistentFlags().StringP("metrics-submission-url", "", "amqp://guest:guest@localhost:5672/", "URL to which metrics will be submitted") viper.BindPFlag("metrics.submission-url", runCmd.PersistentFlags().Lookup("metrics-submission-url")) runCmd.PersistentFlags().StringP("metrics-submission-exchange", "", "metrics", "Exchange to which metrics will be submitted") viper.BindPFlag("metrics.submission-exchange", runCmd.PersistentFlags().Lookup("metrics-submission-exchange")) // Passive DNS options runCmd.PersistentFlags().BoolP("pdns-enable", "", false, "collect and forward aggregated passive DNS data") viper.BindPFlag("pdns.enable", runCmd.PersistentFlags().Lookup("pdns-enable")) runCmd.PersistentFlags().StringP("pdns-submission-url", "", "amqp://guest:guest@localhost:5672/", "URL to which passive DNS events will be submitted") viper.BindPFlag("pdns.submission-url", runCmd.PersistentFlags().Lookup("pdns-submission-url")) runCmd.PersistentFlags().StringP("pdns-submission-exchange", "", "pdns", "Exchange to which passive DNS events will be submitted") viper.BindPFlag("pdns.submission-exchange", runCmd.PersistentFlags().Lookup("pdns-submission-exchange")) // Context collection options runCmd.PersistentFlags().BoolP("context-enable", "", false, "collect and forward flow context for alerted flows") viper.BindPFlag("context.enable", runCmd.PersistentFlags().Lookup("context-enable")) runCmd.PersistentFlags().StringP("context-submission-url", "", "amqp://guest:guest@localhost:5672/", "URL to which flow context will be submitted") viper.BindPFlag("context.submission-url", runCmd.PersistentFlags().Lookup("context-submission-url")) runCmd.PersistentFlags().StringP("context-submission-exchange", "", "context", "Exchange to which flow context events will be submitted") viper.BindPFlag("context.submission-exchange", runCmd.PersistentFlags().Lookup("context-submission-exchange")) runCmd.PersistentFlags().DurationP("context-cache-timeout", "", 60*time.Minute, "time for flow metadata to be kept for uncompleted flows") viper.BindPFlag("context.cache-timeout", runCmd.PersistentFlags().Lookup("context-cache-timeout")) // Bloom filter alerting options runCmd.PersistentFlags().StringP("bloom-file", "b", "", "Bloom filter for external indicator screening") viper.BindPFlag("bloom.file", runCmd.PersistentFlags().Lookup("bloom-file")) runCmd.PersistentFlags().BoolP("bloom-zipped", "z", false, "use gzipped Bloom filter file") viper.BindPFlag("bloom.zipped", runCmd.PersistentFlags().Lookup("bloom-zipped")) runCmd.PersistentFlags().StringP("bloom-alert-prefix", "", "BLF", "String prefix for Bloom filter alerts") viper.BindPFlag("bloom.alert-prefix", runCmd.PersistentFlags().Lookup("bloom-alert-prefix")) runCmd.PersistentFlags().StringSliceP("bloom-blacklist-iocs", "", []string{"/", "/index.htm", "/index.html"}, "Blacklisted strings in Bloom filter (will cause filter to be rejected)") viper.BindPFlag("bloom.blacklist-iocs", runCmd.PersistentFlags().Lookup("bloom-blacklist-iocs")) // IP blacklist alerting options runCmd.PersistentFlags().StringP("ip-blacklist", "", "", "List with IP ranges to alert on") viper.BindPFlag("ip.blacklist", runCmd.PersistentFlags().Lookup("ip-blacklist")) runCmd.PersistentFlags().StringP("ip-alert-prefix", "", "IP-BLACKLIST", "String prefix for IP blacklist alerts") viper.BindPFlag("ip.alert-prefix", runCmd.PersistentFlags().Lookup("ip-alert-prefix")) // Flow extraction options runCmd.PersistentFlags().BoolP("flowextract-enable", "", false, "extract and forward flow metadata") viper.BindPFlag("flowextract.enable", runCmd.PersistentFlags().Lookup("flowextract-enable")) runCmd.PersistentFlags().StringP("flowextract-bloom-selector", "", "", "IP address Bloom filter to select flows to extract") viper.BindPFlag("flowextract.bloom-selector", runCmd.PersistentFlags().Lookup("flowextract-bloom-selector")) runCmd.PersistentFlags().StringP("flowextract-submission-url", "", "amqp://guest:guest@localhost:5672/", "URL to which raw flow events will be submitted") viper.BindPFlag("flowextract.submission-url", runCmd.PersistentFlags().Lookup("flowextract-submission-url")) runCmd.PersistentFlags().StringP("flowextract-submission-exchange", "", "flows", "Exchange to which raw flow events will be submitted") viper.BindPFlag("flowextract.submission-exchange", runCmd.PersistentFlags().Lookup("flowextract-submission-exchange")) // Active enrichment options runCmd.PersistentFlags().BoolP("active-rdns", "", false, "enable active rDNS enrichment for src/dst IPs") viper.BindPFlag("active.rdns", runCmd.PersistentFlags().Lookup("active-rdns")) runCmd.PersistentFlags().DurationP("active-rdns-cache-expiry", "", 2*time.Minute, "cache expiry interval for rDNS lookups") viper.BindPFlag("active.rdns-cache-expiry", runCmd.PersistentFlags().Lookup("active-rdns-cache-expiry")) runCmd.PersistentFlags().BoolP("active-rdns-private-only", "", false, "only do active rDNS enrichment for RFC1918 IPs") viper.BindPFlag("active.rdns-private-only", runCmd.PersistentFlags().Lookup("active-rdns-private-only")) // Logging options runCmd.PersistentFlags().StringP("logfile", "", "", "Path to log file") viper.BindPFlag("logging.file", runCmd.PersistentFlags().Lookup("logfile")) runCmd.PersistentFlags().BoolP("logjson", "", false, "Output logs in JSON format") viper.BindPFlag("logging.json", runCmd.PersistentFlags().Lookup("logjson")) } fever-1.0.8/cmd/fever/main.go000066400000000000000000000002261353566242200157640ustar00rootroot00000000000000package main // DCSO FEVER // Copyright (c) 2017, 2018, DCSO GmbH import cmd "github.com/DCSO/fever/cmd/fever/cmds" func main() { cmd.Execute() } fever-1.0.8/db/000077500000000000000000000000001353566242200132245ustar00rootroot00000000000000fever-1.0.8/db/slurper.go000066400000000000000000000006571353566242200152570ustar00rootroot00000000000000package db // DCSO FEVER // Copyright (c) 2017, DCSO GmbH import ( "github.com/DCSO/fever/types" ) // Slurper is an interface for a worker that can be started (Run()) with a given // channel delivering Entries, storing them in an associated data store. // Finish() can be used to finalize any state. // TODO implement proper start/stop (atm 'hard' stop by exit()ing) type Slurper interface { Run(chan types.Entry) Finish() } fever-1.0.8/db/slurper_dummy.go000066400000000000000000000006711353566242200164660ustar00rootroot00000000000000package db // DCSO FEVER // Copyright (c) 2017, DCSO GmbH import ( "github.com/DCSO/fever/types" ) // DummySlurper is a slurper that just consumes entries with no action. type DummySlurper struct{} // Run starts a DummySlurper. func (s *DummySlurper) Run(eventchan chan types.Entry) { go func() { for range eventchan { } }() } // Finish is a null operation in the DummySlurper implementation. func (s *DummySlurper) Finish() { } fever-1.0.8/db/slurper_ejdb.go000066400000000000000000000017041353566242200162350ustar00rootroot00000000000000// +build ignore package db import ( "github.com/mkilling/goejdb" log "github.com/sirupsen/logrus" ) // EJDBSlurper is a Slurper that stores events in an EJDB database. type EJDBSlurper struct { db *goejdb.Ejdb } // Run starts an EJDBSlurper. func (s *EJDBSlurper) Run(eventchan chan Entry) { var err error i := 0 s.db, err = goejdb.Open("eventsdb", goejdb.JBOWRITER|goejdb.JBOCREAT) if err != nil { log.Warn(err) } coll, _ := s.db.CreateColl("events", nil) coll.SetIndex("timestamp", goejdb.JBIDXSTR) coll.SetIndex("event_type", goejdb.JBIDXSTR) coll.SetIndex("dns.rrname", goejdb.JBIDXSTR) coll.SetIndex("alert.payload_printable", goejdb.JBIDXSTR) go func() { coll.BeginTransaction() for d := range eventchan { if i%5000 == 0 { coll.CommitTransaction() coll.BeginTransaction() } coll.SaveJson(d.JSONLine) i++ } }() } // Finish closes the associated EJDB database.. func (s *EJDBSlurper) Finish() { s.db.Close() } fever-1.0.8/db/slurper_mongodb.go000066400000000000000000000153711353566242200167630ustar00rootroot00000000000000package db import ( "encoding/json" "fmt" "github.com/DCSO/fever/types" log "github.com/sirupsen/logrus" "gopkg.in/mgo.v2" ) // TYPES are event types/collections supported by us var TYPES = []string{ "alert", "dns", "fileinfo", "flow", "http", "smtp", "ssh", "stats", "tls", "misc", } // MAXCOLLSIZEFRACTIONS are the proportions of the general space cap to be // assigned to the collections for each event type -- used to determine // limits for capped collections var MAXCOLLSIZEFRACTIONS = map[string]float64{ "dns": 0.25, "http": 0.2, "flow": 0.25, "smtp": 0.05, "ssh": 0.05, "alert": 0.05, "tls": 0.05, "stats": 0.02, "misc": 0.03, "fileinfo": 0.05, } // INDEXES assigns index parameters to each collection, denoted by the // corresponding event type var INDEXES = map[string]([]mgo.Index){ "dns": []mgo.Index{ //mgo.Index{ // Key: []string{"src_ip", // "dest_ip"}, // Background: true, //}, mgo.Index{ Key: []string{"dns.rrname"}, Background: true, }, mgo.Index{ Key: []string{"timestamp"}, Background: true, }, }, "fileinfo": []mgo.Index{ mgo.Index{ Key: []string{"src_ip", "dest_ip"}, Background: true, }, mgo.Index{ Key: []string{"fileinfo.filename", "fileinfo.md5"}, Background: true, }, mgo.Index{ Key: []string{"timestamp"}, Background: true, }, }, "flow": []mgo.Index{ mgo.Index{ Key: []string{"src_ip", "dest_ip"}, Background: true, }, mgo.Index{ Key: []string{"timestamp"}, Background: true, }, }, "http": []mgo.Index{ mgo.Index{ Key: []string{"src_ip", "dest_ip"}, Background: true, }, mgo.Index{ Key: []string{"http.hostname", "http.http_user_agent"}, Background: true, }, mgo.Index{ Key: []string{"$text:http.url"}, Background: true, }, mgo.Index{ Key: []string{"timestamp"}, Background: true, }, }, "alert": []mgo.Index{ mgo.Index{ Key: []string{"src_ip", "dest_ip"}, Background: true, }, mgo.Index{ Key: []string{"$text:alert.payload_printable"}, Background: true, }, mgo.Index{ Key: []string{"timestamp"}, Background: true, }, }, "smtp": []mgo.Index{ mgo.Index{ Key: []string{"src_ip", "dest_ip"}, Background: true, }, mgo.Index{ Key: []string{"smtp.helo", "smtp.mail_from", "smtp.rcpt_to"}, Background: true, }, mgo.Index{ Key: []string{"email.attachment"}, Background: true, }, mgo.Index{ Key: []string{"timestamp"}, Background: true, }, }, "tls": []mgo.Index{ mgo.Index{ Key: []string{"src_ip", "dest_ip"}, Background: true, }, mgo.Index{ Key: []string{"tls.subject", "tls.issuerdn", "tls.fingerprint"}, Background: true, }, mgo.Index{ Key: []string{"timestamp"}, Background: true, }, }, "misc": []mgo.Index{ mgo.Index{ Key: []string{"src_ip", "dest_ip"}, Background: true, }, mgo.Index{ Key: []string{"timestamp"}, Background: true, }, }, } // MongoSlurper is a Slurper that stores events in an MongoDB database. type MongoSlurper struct { User string Password string Host string Database string TypeDispatch map[string](chan types.Entry) ChunkSize int MaxSize int64 Logger *log.Entry } func (s *MongoSlurper) eventTypeWorker(eventchan chan types.Entry, eventType string) error { var err error cnt := 0 url := fmt.Sprintf("mongodb://%s:%s@%s/%s", s.User, s.Password, s.Host, s.Database) s.Logger.WithFields(log.Fields{"type": eventType}).Info("worker connecting") sess, err := mgo.Dial(url) if err != nil { s.Logger.Fatal(err) return err } s.Logger.WithFields(log.Fields{"type": eventType}).Info("connection established") db := sess.DB(s.Database) // create capped collection coll := db.C(eventType) sizeFrac := MAXCOLLSIZEFRACTIONS[eventType] if sizeFrac == 0 { s.Logger.Warn("Invalid type", eventType, "no max size available for collection") sizeFrac = 0.01 } s.Logger.WithFields(log.Fields{"type": eventType, "sizeFrac": sizeFrac}).Info("determining size fraction") sizeBytes := int(float64(s.MaxSize) * sizeFrac) s.Logger.WithFields(log.Fields{"type": eventType, "maxSize": sizeBytes}).Info("determining size cap") err = coll.Create(&mgo.CollectionInfo{ Capped: true, DisableIdIndex: true, MaxBytes: sizeBytes, }) if err != nil { s.Logger.WithFields(log.Fields{"type": eventType}).Info(err) } // check indexes on collection, create if needed idxList := INDEXES[eventType] if idxList != nil { s.Logger.WithFields(log.Fields{"type": eventType}).Info("checking indexes") for _, idx := range idxList { s.Logger.WithFields(log.Fields{"type": eventType, "idx": idx.Key}).Info("index check") coll.EnsureIndex(idx) } s.Logger.WithFields(log.Fields{"type": eventType}).Info("index check done") } b := coll.Bulk() b.Unordered() for event := range eventchan { var ev map[string]interface{} err := json.Unmarshal([]byte(event.JSONLine), &ev) if err != nil { s.Logger.Warn(err) } else { b.Insert(&ev) cnt++ if cnt%s.ChunkSize == 0 { s.Logger.WithFields(log.Fields{"type": eventType}).Debugf("flushing bulk") _, err = b.Run() if err != nil { s.Logger.Warn(err) } else { s.Logger.WithFields(log.Fields{"type": eventType}).Debugf("flushing complete") } b = coll.Bulk() b.Unordered() cnt = 0 } } } return nil } // MakeMongoSlurper creates a new MongoSlurper instance. func MakeMongoSlurper(host string, database string, user string, password string, chunkSize int, maxSize int64) *MongoSlurper { s := &MongoSlurper{ ChunkSize: chunkSize, Host: host, Database: database, User: user, Password: password, TypeDispatch: make(map[string](chan types.Entry)), MaxSize: maxSize * 1024 * 1024 * 1024, Logger: log.WithFields(log.Fields{"domain": "slurper", "slurper": "mongo"}), } for _, t := range TYPES { s.TypeDispatch[t] = make(chan types.Entry, 1000) } url := fmt.Sprintf("mongodb://%s:%s@%s/%s", s.User, s.Password, s.Host, s.Database) s.Logger.WithFields(log.Fields{"url": url}).Info("preparing for MongoDB connection") return s } // Run starts a MongoSlurper. func (s *MongoSlurper) Run(eventchan chan types.Entry) { // set up workers for each event type for k, v := range s.TypeDispatch { go s.eventTypeWorker(v, k) } // dispatch events to their corresponding worker go func() { for entry := range eventchan { targetchan := s.TypeDispatch[entry.EventType] if targetchan != nil { targetchan <- entry } else { s.TypeDispatch["misc"] <- entry } } }() } // Finish is a null operation in the MongoSlurper implementation. func (s *MongoSlurper) Finish() { } fever-1.0.8/db/slurper_postgres.go000066400000000000000000000125551353566242200172050ustar00rootroot00000000000000package db import ( "bytes" "fmt" "strings" "time" "github.com/DCSO/fever/types" log "github.com/sirupsen/logrus" pg "gopkg.in/pg.v5" ) var maxRetries = 20 // PostgresSlurper is a Slurper that stores events in an PostgreSQL database. type PostgresSlurper struct { DB *pg.DB LastRotatedTime time.Time IndexChan chan string CurrentTableName string RotationInterval time.Duration MaxTableSize int64 ChunkSize int Logger *log.Entry } // This is a fixed format for table names. func formatTableName(timestamp time.Time) string { return timestamp.Format("event-2006-01-02-1504") } // MakePostgresSlurper creates a new PostgresSlurper instance. func MakePostgresSlurper(host string, database string, user string, password string, rotationInterval time.Duration, maxTableSize int64, chunkSize int) *PostgresSlurper { var err error var i int var hasExt int db := pg.Connect(&pg.Options{ User: user, Password: password, Addr: host, Database: database, }) l := log.WithFields(log.Fields{ "domain": "slurper", "slurper": "postgres", }) l.WithFields(log.Fields{ "user": user, "host": host, "database": database, }).Info("connected to database") _, err = db.Query(pg.Scan(&hasExt), SQLCheckForTrigramExtension) for i = 0; err != nil && strings.Contains(err.Error(), "system is starting up"); i++ { if i > maxRetries { break } l.Warnf("problem checking for trigram extension: %s -- retrying %d/%d", err.Error(), i, maxRetries) _, err = db.Query(pg.Scan(&hasExt), SQLCheckForTrigramExtension) time.Sleep(10 * time.Second) } if err != nil { l.Fatalf("permanent error checking for trigram extension: %s", err.Error()) } if hasExt < 1 { l.Fatal("trigram extension ('pg_trgm') not loaded, please run "+ "'CREATE EXTENSION pg_trgm;'", err) } _, err = db.Exec(SQLTrigramFunction) if err != nil { l.Fatalf("error creating index preparation function: %s", err) } _, err = db.Exec(SQLQueryAllEvents) if err != nil { l.Fatalf("error creating global query function: %s", err) } s := &PostgresSlurper{ DB: db, RotationInterval: rotationInterval, MaxTableSize: maxTableSize * 1024 * 1024 * 1024, ChunkSize: chunkSize, Logger: l, } return s } type tableSize struct { Table string Size int64 } func (s *PostgresSlurper) expireOldTables() error { var tblSizes []tableSize _, err := s.DB.Query(&tblSizes, SQLGetTableSizes) if err != nil { s.Logger.Warn("error determining table sizes", err) return err } totalSize := int64(0) for _, v := range tblSizes { totalSize += v.Size if totalSize > s.MaxTableSize && s.CurrentTableName != v.Table { s.Logger.WithFields(log.Fields{ "table": v.Table, "size": v.Size, }).Info("table expired") _, err = s.DB.Exec(fmt.Sprintf(`DROP TABLE "%s";`, v.Table)) if err != nil { s.Logger.WithFields(log.Fields{ "table": v.Table, "size": v.Size, "error": err.Error(), }).Warn("error dropping table") return err } } } return nil } func (s *PostgresSlurper) indexFunc() { for tblToIndex := range s.IndexChan { s.Logger.WithFields(log.Fields{ "table": tblToIndex, }).Info("creating indexes") idxSQL := fmt.Sprintf(SQLIndex, tblToIndex, tblToIndex, tblToIndex, tblToIndex, tblToIndex) _, idxErr := s.DB.Exec(idxSQL) if idxErr != nil { s.Logger.WithFields(log.Fields{ "table": tblToIndex, "error": idxErr.Error(), }).Info("error creating index") } s.Logger.Info("expiring old tables") s.expireOldTables() } } func (s *PostgresSlurper) slurpPostgres(eventchan chan types.Entry) { cnt := 0 var copybuf bytes.Buffer for { event := <-eventchan copybuf.WriteString(event.Timestamp) copybuf.WriteString("\t") copybuf.WriteString(event.JSONLine) copybuf.WriteString("\n") if cnt > 0 && cnt%s.ChunkSize == 0 { if s.LastRotatedTime.IsZero() || (time.Since(s.LastRotatedTime) > s.RotationInterval) { newTableName := formatTableName(time.Now()) if s.LastRotatedTime.IsZero() { s.Logger.WithFields(log.Fields{ "table": newTableName, }).Info("initializing table") } else { s.Logger.WithFields(log.Fields{ "from": s.CurrentTableName, "to": newTableName, }).Info("rotating tables") } crSQL := fmt.Sprintf(SQLCreate, newTableName, newTableName, s.DB.Options().User) _, crErr := s.DB.Exec(crSQL) if crErr != nil { s.Logger.WithFields(log.Fields{ "table": newTableName, "error": crErr.Error(), }).Warn("error creating table") } if !s.LastRotatedTime.IsZero() { s.IndexChan <- s.CurrentTableName } s.CurrentTableName = newTableName s.LastRotatedTime = time.Now() } cnt = 0 r := strings.NewReader(copybuf.String()) _, err := s.DB.CopyFrom(r, fmt.Sprintf(SQLCopy, s.CurrentTableName)) if err != nil { s.Logger.Warn(err) } else { s.Logger.WithFields(log.Fields{ "chunksize": s.ChunkSize, "table": s.CurrentTableName, }).Debug("COPY complete") } copybuf.Reset() } cnt++ } } // Run starts a PostgresSlurper. func (s *PostgresSlurper) Run(eventchan chan types.Entry) { // start indexer thread s.IndexChan = make(chan string, 1000) go s.indexFunc() // run slurper thread go s.slurpPostgres(eventchan) } // Finish is a null operation in the PostgresSlurper implementation. func (s *PostgresSlurper) Finish() { } fever-1.0.8/db/sql.go000066400000000000000000000122421353566242200143530ustar00rootroot00000000000000package db // DCSO FEVER // Copyright (c) 2017, DCSO GmbH // SQLTrigramFunction is a plpgsql function to pull out indexable content from event JSON const SQLTrigramFunction = `CREATE OR REPLACE FUNCTION trigram_string(payload jsonb) RETURNS text AS $$ DECLARE buffer varchar := ''; BEGIN -- trying in typical order of frequency IF payload->>'event_type' = 'dns' THEN RETURN payload->'dns'->>'rdata'; END IF; IF payload->>'event_type' = 'http' THEN RETURN (payload->'http'->>'hostname') || '|' || (payload->'http'->>'url') || '|' || (payload->'http'->>'http_user_agent'); END IF; IF payload->>'event_type' = 'tls' THEN RETURN (payload->'tls'->>'subject') ||'|' || (payload->'tls'->>'issuerdn') || '|' || (payload->'tls'->>'fingerprint'); END IF; IF payload->>'event_type' = 'alert' THEN RETURN (payload->'alert'->>'payload_printable') || '|' || (payload->'alert'->>'payload'); END IF; IF payload->>'event_type' = 'smtp' THEN RETURN (payload->'smtp'->>'helo') || '|' || (payload->'smtp'->>'mail_from') || '|' || (payload->'smtp'->>'rcpt_to') || '|' || (payload->'email'->>'from') || '|' || (payload->'email'->>'to') || '|' || (payload->'email'->>'attachment'); END IF; IF payload->>'event_type' = 'fileinfo' THEN RETURN (payload->'fileinfo'->>'filename') || '|' || (payload->'fileinfo'->>'md5'); END IF; RETURN buffer; END; $$ LANGUAGE plpgsql IMMUTABLE;` // SQLCheckForTrigramExtension is an SQL query to check whether the trigram extension is available. const SQLCheckForTrigramExtension = `SELECT COUNT(*) FROM pg_available_extensions WHERE name = 'pg_trgm';` // SQLCreate is an SQL/DDL clause to create a new event table const SQLCreate = `CREATE UNLOGGED TABLE IF NOT EXISTS "%s" (ts timestamp without time zone default now(), payload jsonb); GRANT ALL PRIVILEGES ON TABLE "%s" to %s;` // SQLCopy is an SQL/DDL clause to bulk insert a chunk of JSON into the database const SQLCopy = `COPY "%s" (ts, payload) FROM STDIN WITH CSV DELIMITER E'\t' QUOTE E'\b'` // SQLIndex is an SQL/DDL clause to create indexes on event tables const SQLIndex = `CREATE INDEX ON "%s" (((payload->>'src_ip')::INET), ((payload->>'src_port')::INT)); CREATE INDEX ON "%s" (ts); CREATE INDEX ON "%s" (((payload->>'dest_ip')::INET), ((payload->>'dest_port')::INT)); CREATE INDEX ON "%s" ((payload->>'event_type')); CREATE INDEX ON "%s" using GIN (trigram_string(payload) gin_trgm_ops)` // SQLGetTableSizes is an SQL query to obtain the names of tables in the current schema and their size in bytes. const SQLGetTableSizes = `SELECT relname as table, pg_total_relation_size(relid) as size FROM pg_catalog.pg_statio_user_tables ORDER BY 1 DESC;` // SQLGenericQuery is the main kind of query used to pull out event metadata. const SQLGenericQuery = `SELECT * FROM all_events_query($1::text, $2::timestamp, $3::timestamp, $4::text[], $5::inet, $6::int, $7::inet, $8::int, $9::int);` // SQLQueryAllEvents is a plpgsql function to enable queries over all hourly tables // Example: SELECT COUNT(*) FROM all_events_query('WHERE trigram_string(payload) LIKE ''%%foo%%'''); const SQLQueryAllEvents = `CREATE OR REPLACE FUNCTION all_events_query(keyword text, start_time timestamp with time zone, end_time timestamp with time zone, event_type text[], ipsrc inet, portsrc int, ipdest inet, portdest int, mlimit int) RETURNS TABLE (ts timestamp, payload jsonb) AS $$ DECLARE clause text; t RECORD; tables CURSOR FOR SELECT * FROM information_schema.tables WHERE table_name LIKE 'event%'; BEGIN clause := ''; OPEN tables; LOOP FETCH tables INTO t; EXIT WHEN NOT FOUND; IF clause != '' THEN clause := clause || ' UNION ALL '; END IF; clause := clause || 'SELECT * FROM ' || quote_ident(t.table_name) || ' WHERE ts BETWEEN ' || quote_literal(start_time) || ' AND ' || quote_literal(end_time); IF keyword IS NOT NULL THEN clause := clause || ' AND trigram_string(payload) LIKE ' || quote_literal(keyword); END IF; IF event_type IS NOT NULL THEN clause := clause || ' AND payload->>''event_type'' = ANY(' || quote_literal(event_type) || ')'; END IF; IF ipsrc IS NOT NULL THEN clause := clause || ' AND (payload->>''src_ip'')::inet <<= inet ' || quote_literal(ipsrc); END IF; IF portsrc IS NOT NULL THEN clause := clause || ' AND payload->>''src_port'' = ' || quote_literal(portsrc); END IF; IF ipdest IS NOT NULL THEN clause := clause || ' AND (payload->>''dest_ip'')::inet <<= inet ' || quote_literal(ipdest); END IF; IF portdest IS NOT NULL THEN clause := clause || ' AND payload->>''dest_port'' = ' || quote_literal(portdest); END IF; END LOOP; IF mlimit IS NOT NULL THEN clause := clause || ' LIMIT ' || quote_literal(mlimit); END IF; RAISE NOTICE '%', clause; CLOSE tables; RETURN QUERY EXECUTE clause; END; $$ LANGUAGE plpgsql STABLE; ` fever-1.0.8/doc/000077500000000000000000000000001353566242200134045ustar00rootroot00000000000000fever-1.0.8/doc/database.md000066400000000000000000000040251353566242200154730ustar00rootroot00000000000000## Database schema Events are stored in a JSONB column tagged with a timestamp. Indexes will be created on this timestamp, the source/destination IP/port values (composite), and the event type. Another full-text (trigram) index will be built for event type-specific plain-text fields that are concatenated using a `|`. The keyword-based full-text matches are intended to serve as the main means of access to 'interesting' events, and can be further refined by IP/port/type/... constraints, which are also indexed. All further queries on JSON fields **will be unindexed**, so care should be taken to reduce the search space as much as possible using indexed queries. A separate database must be used and the connecting user must be able to `CREATE` and `DROP` tables in the public schema. ```sql -- Initial table CREATE UNLOGGED TABLE IF NOT EXISTS "events-YY-MM-DD-HHMM" (ts timestamp without time zone default now(), payload jsonb); GRANT ALL PRIVILEGES ON TABLE "events-YY-MM-DD-HHMM" to sensor; -- Deferred CREATE INDEX ON "events-YY-MM-DD-HHMM" (ts); CREATE INDEX ON "events-YY-MM-DD-HHMM" (((payload->>'src_ip')::INET), ((payload->>'src_port')::INT)); CREATE INDEX ON "events-YY-MM-DD-HHMM" (((payload->>'dest_ip')::INET), ((payload->>'dest_port')::INT)); CREATE INDEX ON "events-YY-MM-DD-HHMM" ((payload->>'event_type')); CREATE INDEX ON "events-YY-MM-DD-HHMM" using GIN (trigram_string(payload) gin_trgm_ops) ``` `trigram_string(payload jsonb)` is a PL/PgSQL function that extracts and concatenates relevant data for indexing, see `sql.go`. The following contents are used to build the full-text index: - `dns` events: - `dns->rdata` - `http` events: - `http->hostname` + `http->url` + `http->http_user_agent` - `tls` events: - `tls->subject` + `tls->issuerdn` + `tls->fingerprint` - `alert` events: - `alert->payload_printable` + `alert->payload` - `smtp` events: - `smtp->helo` + `smtp->mail_from` + `smtp->rcpt_to` + `email->from`+ `email->to` + `email->attachment` - `fileinfo` events: - `fileinfo->filename` + `fileinfo->md5`fever-1.0.8/doc/flow-agg.md000066400000000000000000000026761353566242200154440ustar00rootroot00000000000000## Aggregated flow metadata JSON example ```json { "sensor-id": "foobar", "time-start": "2017-03-13T17:36:53.205850748+01:00", "time-end": "2017-03-13T17:36:58.205967348+01:00", "tuples": { "172.22.0.214_172.18.8.116_993": { "count": 1, "total_bytes_toclient": 86895, "total_bytes_toserver": 17880 }, "172.22.0.214_172.18.8.145_2222": { "count": 2, "total_bytes_toclient": 36326, "total_bytes_toserver": 4332 }, "172.22.0.214_198.232.125.113_80": { "count": 3, "total_bytes_toclient": 23242, "total_bytes_toserver": 1223 }, "172.22.0.214_198.232.125.123_80": { "count": 1, "total_bytes_toclient": 1026322, "total_bytes_toserver": 51232 } }, "proxy-map": { "23.37.43.27": { "ss.symcd.com": 1 } } } ``` The `tuples` keys represent routes in which sourceIP/destIP/destPort (concatenated using `_`) map to the number of flow events observed in the reported time period. In the `proxy-map` dict, the keys are destination IP addresses which have had observed HTTP requests on ports 8000-8999, 80 or 3128 (i.e. typical proxy ports). The associated values are the number of times that these requests were made with certain HTTP Host headers. Using the `-n` parameter, the reporting frequency can be tuned. Longer intervals (e.g. hours) will reduce load on the consuming endpoint, but may also lead to larger payloads in the JSON outlined above.fever-1.0.8/fever.service000066400000000000000000000005031353566242200153260ustar00rootroot00000000000000[Unit] Description=fast, extensible, versatile event router Documentation=https://github.com/DCSO/fever After=network.target [Service] SyslogIdentifier=fever EnvironmentFile=-/etc/default/fever ExecStart=/usr/bin/fever run $FEVER_ARGS ExecStop=/usr/bin/pkill fever Restart=on-failure [Install] WantedBy=multi-user.target fever-1.0.8/fever.yaml000066400000000000000000000064441353566242200146420ustar00rootroot00000000000000# Config file for FEVER # --------------------- # Output additional debug information. # verbose: true # Enable output of profiling information to specified file. # profile: profile.out # Use the given size for defining the size of data blocks to be handled at once. # chunksize: 50000 # Do not submit data to the sinks, only print on stdout. # dummy: true # Retry connection to sockets or servers for at most the given amount of times before # giving up. Use the value of 0 to never give up. # reconnect-retries: 5 # Specify time interval or number of items to cache before flushing to # database, whichever happens first. # flushtime: 1m # flushcount: 100000 # Configuration for PostgreSQL 9.5+ database connection. database: enable: false host: localhost user: user password: pass database: test # Set to true to use the MongoDB interface instead of PostgreSQL. mongo: false # Time interval after which a new table is created and background # indexing is started. rotate: 1h # Maximum size in gigabytes. maxtablesize: 50 # Configuration for input (from Suricata side). Only one of 'socket' # or 'redis' is supported at the same time, comment/uncomment to choose. # The 'nopipe' option disables Redis pipelining. For Redis, we assume the # 'suricata' list as a source. input: socket: /tmp/suri.sock #redis: # server: localhost # nopipe: true # Definition what event types to forward. Set 'all' to true to forward # everything received from Suricata, otherwise use the 'types' list to choose. # By default, we only forward alerts and stats events. forward: all: false types: - alert - stats # Configuration for output of forwarded events (socket to e.g. Logstash). output: socket: /tmp/suri-forward.sock # Configuration for flow report submission. flowreport: # Interval used for aggregation. interval: 60s submission-url: amqp://guest:guest@localhost:5672/ submission-exchange: aggregations # Set to true to disable gzip compression for uploads. nocompress: false # Configuration for metrics (i.e. InfluxDB) submission. metrics: enable: true submission-url: amqp://guest:guest@localhost:5672/ submission-exchange: metrics # Configuration for passive DNS submission. pdns: enable: true submission-url: amqp://guest:guest@localhost:5672/ submission-exchange: pdns # Configuration for alert-associated metadata submission. context: enable: false cache-timeout: 1h submission-url: amqp://guest:guest@localhost:5672/ submission-exchange: context # Configuration for detailed flow metadata submission. flowextract: enable: false submission-url: amqp://guest:guest@localhost:5672/ submission-exchange: aggregations # Uncomment to enable flow collection only for IPs in the given # Bloom filter. # bloom-selector: /tmp/flows.bloom # Configuration for Bloom filter alerting on HTTP, DNS and # TLS metadata events. #bloom: # file: ./in.bloom.gz # zipped: true # alert-prefix: BLF # blacklist-iocs: # - / # - /index.htm # - /index.html # Configuration for active information gathering. active: # Enable reverse DNS lookups for src/dst IPs. rdns: false rdns-private-only: true rdns-cache-expiry: 120s logging: # Insert file name here to redirect logs to separate file. file: # Set to true to enable JSON output. json: falsefever-1.0.8/input/000077500000000000000000000000001353566242200137765ustar00rootroot00000000000000fever-1.0.8/input/input.go000066400000000000000000000004011353566242200154570ustar00rootroot00000000000000package input // DCSO FEVER // Copyright (c) 2017, DCSO GmbH // Input is an interface describing the behaviour for a component to // handle events parsed from EVE input. type Input interface { GetName() string Run() SetVerbose(bool) Stop(chan bool) } fever-1.0.8/input/input_redis.go000066400000000000000000000167131353566242200166620ustar00rootroot00000000000000package input // DCSO FEVER // Copyright (c) 2017, DCSO GmbH import ( "io" "sync" "time" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" "github.com/garyburd/redigo/redis" log "github.com/sirupsen/logrus" ) var perfStatsSendInterval = 10 * time.Second var backOffTime = 500 * time.Millisecond // RedisInputPerfStats contains performance stats written to InfluxDB // for monitoring. type RedisInputPerfStats struct { RedisQueueLength uint64 `influx:"redis_queue_length"` } // RedisInput is an Input reading JSON EVE input from Redis list. type RedisInput struct { EventChan chan types.Entry Verbose bool Running bool Pool *redis.Pool StopChan chan bool StoppedChan chan bool Addr string Proto string Reconnecting bool ParseWorkers int BatchSize int PerfStats RedisInputPerfStats StatsEncoder *util.PerformanceStatsEncoder UsePipelining bool } // GetName returns a printable name for the input func (ri *RedisInput) GetName() string { return "Redis input" } func doParseJSON(inchan chan []byte, outchan chan types.Entry, wg *sync.WaitGroup) { defer wg.Done() log.Info("started parse worker") for v := range inchan { e, err := util.ParseJSON(v) if err != nil { log.Warn(err, v) continue } outchan <- e } } func (ri *RedisInput) popPipeline(wg *sync.WaitGroup, stopChan chan bool, parseChan chan []byte) { var err error defer wg.Done() var skipLogs = false for { select { case <-stopChan: return default: conn := ri.Pool.Get() err = conn.Send("MULTI") if err != nil { if !skipLogs { log.Warnf("MULTI error %s, backing off (%v) and disabling further warnings", err.Error(), backOffTime) skipLogs = true } conn.Close() time.Sleep(backOffTime) continue } else { if skipLogs { skipLogs = false log.Warnf("MULTI succeeded, showing warnings again") } } for i := 0; i < ri.BatchSize; i++ { err = conn.Send("RPOP", "suricata") if err != nil { if !skipLogs { log.Warnf("RPOP error %s, backing off (%v) and disabling further warnings", err.Error(), backOffTime) skipLogs = true } conn.Close() time.Sleep(backOffTime) break } else { if skipLogs { skipLogs = false log.Warnf("RPOP sending succeeded, showing warnings again") } } } r, err := redis.Values(conn.Do("EXEC")) if err != nil { if !skipLogs { log.Warnf("EXEC error %s, backing off (%v) and disabling further warnings", err.Error(), backOffTime) skipLogs = true } conn.Close() continue } else { if skipLogs { skipLogs = false log.Warnf("EXEC sending succeeded, showing warnings again") } } conn.Close() for i, v := range r { if v == nil { if i == 0 { log.Debugf("empty result received, backing off (%v)", backOffTime) time.Sleep(backOffTime) } conn.Close() break } else { parseChan <- v.([]byte) } } conn.Close() } } } func (ri *RedisInput) noPipePop(wg *sync.WaitGroup, stopChan chan bool, parseChan chan []byte) { conn := ri.Pool.Get() defer wg.Done() defer conn.Close() for { select { case <-stopChan: return default: vals, err := redis.Values(conn.Do("BRPOP", "suricata", "1")) if vals != nil && err == nil && len(vals) > 0 { parseChan <- vals[1].([]byte) } else { time.Sleep(backOffTime) if err.Error() != "redigo: nil returned" && err != io.EOF { log.Warn(err) conn = ri.Pool.Get() } } } } } func (ri *RedisInput) handleServerConnection() { var wg sync.WaitGroup var parsewg sync.WaitGroup parseChan := make(chan []byte) pipelineStopChan := make(chan bool) for i := 0; i < ri.ParseWorkers; i++ { parsewg.Add(1) go doParseJSON(parseChan, ri.EventChan, &parsewg) } if ri.UsePipelining { wg.Add(1) go ri.popPipeline(&wg, pipelineStopChan, parseChan) } else { log.Info("Not using Redis pipelining.") wg.Add(3) go ri.noPipePop(&wg, pipelineStopChan, parseChan) go ri.noPipePop(&wg, pipelineStopChan, parseChan) go ri.noPipePop(&wg, pipelineStopChan, parseChan) } wg.Add(1) go ri.sendPerfStats(&wg) <-ri.StopChan close(pipelineStopChan) wg.Wait() close(parseChan) parsewg.Wait() close(ri.StoppedChan) } func (ri *RedisInput) sendPerfStats(wg *sync.WaitGroup) { defer wg.Done() start := time.Now() for { conn := ri.Pool.Get() select { case <-ri.StopChan: conn.Close() return default: if time.Since(start) > perfStatsSendInterval { if ri.StatsEncoder != nil { r, err := conn.Do("LLEN", "suricata") if err != nil { if err == io.EOF { conn.Close() time.Sleep(perfStatsSendInterval) continue } else { log.Warnf("error retrieving Redis list length: %s", err.Error()) } } else { ri.PerfStats.RedisQueueLength, err = redis.Uint64(r, err) if err == nil { ri.StatsEncoder.Submit(ri.PerfStats) } } } start = time.Now() } time.Sleep(1 * time.Second) } conn.Close() } } // MakeRedisInput returns a new RedisInput, where the string parameter denotes a // hostname:port combination. func MakeRedisInput(addr string, outChan chan types.Entry, batchSize int) (*RedisInput, error) { var err error ri := &RedisInput{ EventChan: outChan, Verbose: false, StopChan: make(chan bool), Addr: addr, Proto: "tcp", ParseWorkers: 3, BatchSize: batchSize, Pool: &redis.Pool{ MaxIdle: 5, IdleTimeout: 240 * time.Second, Dial: func() (redis.Conn, error) { c, err := redis.Dial("tcp", addr) if err != nil { return nil, err } log.Infof("Dialing %s... result: %v", addr, err == nil) return c, err }, TestOnBorrow: func(c redis.Conn, t time.Time) error { _, err := c.Do("PING") return err }, }, } return ri, err } // MakeRedisInputSocket returns a new RedisInput, where string parameter // denotes a socket. func MakeRedisInputSocket(addr string, outChan chan types.Entry, batchSize int) (*RedisInput, error) { var err error ri := &RedisInput{ EventChan: outChan, Verbose: false, StopChan: make(chan bool), Addr: addr, Proto: "unix", ParseWorkers: 3, BatchSize: batchSize, Pool: &redis.Pool{ MaxIdle: 5, IdleTimeout: 240 * time.Second, Dial: func() (redis.Conn, error) { c, err := redis.Dial("unix", addr) if err != nil { return nil, err } log.Infof("Dialing %s... result: %v", addr, err == nil) return c, err }, TestOnBorrow: func(c redis.Conn, t time.Time) error { _, err := c.Do("PING") if err != nil { log.Println(err) } return err }, }, } return ri, err } // SubmitStats registers a PerformanceStatsEncoder for runtime stats submission. func (ri *RedisInput) SubmitStats(sc *util.PerformanceStatsEncoder) { ri.StatsEncoder = sc } // Run starts the RedisInput func (ri *RedisInput) Run() { if !ri.Running { ri.Running = true ri.StopChan = make(chan bool) go ri.handleServerConnection() } } // Stop causes the RedisInput to stop reading from the Redis list and close all // associated channels, including the passed notification channel. func (ri *RedisInput) Stop(stoppedChan chan bool) { if ri.Running { ri.StoppedChan = stoppedChan ri.StopChan <- true close(ri.StopChan) ri.Pool.Close() ri.Running = false } } // SetVerbose sets the input's verbosity level func (ri *RedisInput) SetVerbose(verbose bool) { ri.Verbose = verbose } fever-1.0.8/input/input_redis_test.go000066400000000000000000000124241353566242200177140ustar00rootroot00000000000000package input // DCSO FEVER // Copyright (c) 2017, 2019, DCSO GmbH import ( "encoding/json" "fmt" "io/ioutil" "math/rand" "os" "path/filepath" "sort" "sync" "testing" "time" "github.com/DCSO/fever/types" "github.com/garyburd/redigo/redis" log "github.com/sirupsen/logrus" "github.com/stvp/tempredis" ) const nofRedisTests = 10000 func makeEveEvent(etype string, number int) string { eve := types.EveEvent{ EventType: etype, FlowID: int64(number), SrcIP: fmt.Sprintf("10.0.0.%d", number), SrcPort: []int{11, 12, 13, 14, 15}[rand.Intn(5)], DestIP: fmt.Sprintf("10.0.0.%d", rand.Intn(50)), DestPort: []int{11, 12, 13, 14, 15}[rand.Intn(5)], Proto: []string{"TCP", "UDP"}[rand.Intn(2)], } json, err := json.Marshal(eve) if err != nil { panic(err) } return string(json) } type byID []types.Entry func (a byID) Len() int { return len(a) } func (a byID) Swap(i, j int) { a[i], a[j] = a[j], a[i] } func (a byID) Less(i, j int) bool { var ie, je types.EveEvent err := json.Unmarshal([]byte(a[i].JSONLine), &ie) if err != nil { log.Fatal(err) } err = json.Unmarshal([]byte(a[j].JSONLine), &je) if err != nil { log.Fatal(err) } return ie.FlowID < je.FlowID } func _TestRedisInput(t *testing.T, usePipelining bool, sock string) { s, err := tempredis.Start(tempredis.Config{ "unixsocket": sock, }) if err != nil { t.Fatal(err) } defer s.Term() client, err := redis.Dial("unix", s.Socket()) if err != nil { t.Fatal(err) } defer client.Close() events := make([]string, nofRedisTests) var wg sync.WaitGroup wg.Add(1) go func(myWg *sync.WaitGroup) { defer myWg.Done() for i := 0; i < nofRedisTests; i++ { events[i] = makeEveEvent([]string{"http", "dns", "foo"}[rand.Intn(3)], i) client.Do("LPUSH", "suricata", events[i]) } }(&wg) wg.Wait() evChan := make(chan types.Entry) coll := make([]types.Entry, 0) wg.Add(1) go func(myWg *sync.WaitGroup) { defer myWg.Done() i := 0 for e := range evChan { coll = append(coll, e) if i == nofRedisTests-1 { return } i++ } }(&wg) ri, err := MakeRedisInputSocket(s.Socket(), evChan, 500) ri.UsePipelining = usePipelining if err != nil { t.Fatal(err) } ri.Run() wg.Wait() stopChan := make(chan bool) ri.Stop(stopChan) <-stopChan close(evChan) sort.Sort(byID(coll)) if len(coll) != nofRedisTests { t.Fatalf("unexpected number of items read from Redis queue: %d != %d", len(coll), nofRedisTests) } for i := 0; i < nofRedisTests; i++ { var checkEvent types.EveEvent err := json.Unmarshal([]byte(events[i]), &checkEvent) if err != nil { t.Fatal(err) } if coll[i].EventType != checkEvent.EventType { t.Fatalf("wrong event type for test event %d: %s != %s", i, coll[i].EventType, checkEvent.EventType) } } } func TestRedisInputWithPipelining(t *testing.T) { dir, err := ioutil.TempDir("", "test") if err != nil { log.Fatal(err) } defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, "withPipe.sock") _TestRedisInput(t, true, tmpfn) } func TestRedisInputNoPipelining(t *testing.T) { dir, err := ioutil.TempDir("", "test") if err != nil { log.Fatal(err) } defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, "withPipe.sock") _TestRedisInput(t, false, tmpfn) } func _TestRedisGone(t *testing.T, usePipelining bool, sock string) { s, err := tempredis.Start(tempredis.Config{ "unixsocket": sock, }) if err != nil { t.Fatal(err) } evChan := make(chan types.Entry) ri, err := MakeRedisInputSocket(s.Socket(), evChan, 500) ri.UsePipelining = usePipelining if err != nil { t.Fatal(err) } ri.Run() time.Sleep(2 * time.Second) s.Term() s, err = tempredis.Start(tempredis.Config{ "unixsocket": sock, }) if err != nil { t.Fatal(err) } client, err := redis.Dial("unix", s.Socket()) if err != nil { t.Fatal(err) } defer client.Close() events := make([]string, nofRedisTests) var wg sync.WaitGroup go func() { for i := 0; i < nofRedisTests; i++ { events[i] = makeEveEvent([]string{"http", "dns", "foo"}[rand.Intn(3)], i) client.Do("LPUSH", "suricata", events[i]) } }() coll := make([]types.Entry, 0) wg.Add(1) go func(myWg *sync.WaitGroup) { defer myWg.Done() i := 0 for e := range evChan { coll = append(coll, e) if i == nofRedisTests-1 { return } i++ } }(&wg) wg.Wait() stopChan := make(chan bool) ri.Stop(stopChan) <-stopChan close(evChan) sort.Sort(byID(coll)) if len(coll) != nofRedisTests { t.Fatalf("unexpected number of items read from Redis queue: %d != %d", len(coll), nofRedisTests) } for i := 0; i < nofRedisTests; i++ { var checkEvent types.EveEvent err := json.Unmarshal([]byte(events[i]), &checkEvent) if err != nil { t.Fatal(err) } if coll[i].EventType != checkEvent.EventType { t.Fatalf("wrong event type for test event %d: %s != %s", i, coll[i].EventType, checkEvent.EventType) } } } func TestRedisGoneWithPipelining(t *testing.T) { dir, err := ioutil.TempDir("", "test") if err != nil { log.Fatal(err) } defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, "withPipe.sock") _TestRedisGone(t, true, tmpfn) } func TestRedisGoneNoPipelining(t *testing.T) { dir, err := ioutil.TempDir("", "test") if err != nil { log.Fatal(err) } defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, "withPipe.sock") _TestRedisGone(t, false, tmpfn) } fever-1.0.8/input/input_socket.go000066400000000000000000000057461353566242200170500ustar00rootroot00000000000000package input // DCSO FEVER // Copyright (c) 2017, DCSO GmbH import ( "bufio" "net" "time" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" log "github.com/sirupsen/logrus" ) // SocketInput is an Input reading JSON EVE input from a Unix socket. type SocketInput struct { EventChan chan types.Entry Verbose bool Running bool InputListener net.Listener StopChan chan bool StoppedChan chan bool } // GetName returns a printable name for the input func (si *SocketInput) GetName() string { return "Socket input" } func (si *SocketInput) handleServerConnection() { for { select { case <-si.StopChan: close(si.StoppedChan) return default: var start time.Time var totalLen int si.InputListener.(*net.UnixListener).SetDeadline(time.Now().Add(1e9)) c, err := si.InputListener.Accept() if nil != err { if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { continue } log.Info(err) } if si.Verbose { start = time.Now() } scanner := bufio.NewScanner(c) buf := make([]byte, 0, 32*1024*1024) scanner.Buffer(buf, 32*1024*1024) for { for scanner.Scan() { select { case <-si.StopChan: close(si.StoppedChan) return default: json := scanner.Bytes() totalLen += len(json) e, err := util.ParseJSON(json) if err != nil { log.Warn(err, string(json[:])) continue } si.EventChan <- e } } errRead := scanner.Err() if errRead == nil { break } else if errRead == bufio.ErrTooLong { log.Warn(errRead) scanner = bufio.NewScanner(c) scanner.Buffer(buf, 2*cap(buf)) } else { log.Warn(errRead) } } if si.Verbose { elapsed := time.Since(start) log.WithFields(log.Fields{ "size": totalLen, "elapsedTime": elapsed, }).Info("connection handled") } } } } // MakeSocketInput returns a new SocketInput reading from the Unix socket // inputSocket and writing parsed events to outChan. If no such socket could be // created for listening, the error returned is set accordingly. func MakeSocketInput(inputSocket string, outChan chan types.Entry) (*SocketInput, error) { var err error si := &SocketInput{ EventChan: outChan, Verbose: false, StopChan: make(chan bool), } si.InputListener, err = net.Listen("unix", inputSocket) if err != nil { return nil, err } return si, err } // Run starts the SocketInput func (si *SocketInput) Run() { if !si.Running { si.Running = true si.StopChan = make(chan bool) go si.handleServerConnection() } } // Stop causes the SocketInput to stop reading from the socket and close all // associated channels, including the passed notification channel. func (si *SocketInput) Stop(stoppedChan chan bool) { if si.Running { si.StoppedChan = stoppedChan close(si.StopChan) si.Running = false } } // SetVerbose sets the input's verbosity level func (si *SocketInput) SetVerbose(verbose bool) { si.Verbose = verbose } fever-1.0.8/input/input_socket_test.go000066400000000000000000000030251353566242200200730ustar00rootroot00000000000000package input // DCSO FEVER // Copyright (c) 2017, DCSO GmbH import ( "encoding/json" "fmt" "io/ioutil" "math/rand" "net" "os" "path/filepath" "testing" "github.com/DCSO/fever/types" log "github.com/sirupsen/logrus" ) func TestSocketInput(t *testing.T) { dir, err := ioutil.TempDir("", "test") if err != nil { t.Fatal(err) } defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) evChan := make(chan types.Entry) events := make([]string, 1000) is, err := MakeSocketInput(tmpfn, evChan) if err != nil { t.Fatal(err) } is.Run() submitDone := make(chan bool) collectDone := make(chan bool) go func() { c, err := net.Dial("unix", tmpfn) if err != nil { log.Println(err) } for i := 0; i < 1000; i++ { events[i] = makeEveEvent([]string{"http", "dns", "foo"}[rand.Intn(3)], i) c.Write([]byte(events[i])) c.Write([]byte("\n")) } c.Close() close(submitDone) }() coll := make([]types.Entry, 0) go func() { for i := 0; i < 1000; i++ { e := <-evChan coll = append(coll, e) } close(collectDone) }() <-submitDone <-collectDone ch := make(chan bool) is.Stop(ch) <-ch if len(coll) != 1000 { t.Fatalf("unexpected number of items read from socket: %d != 1000", len(coll)) } for i := 0; i < 1000; i++ { var checkEvent types.EveEvent json.Unmarshal([]byte(events[i]), &checkEvent) if coll[i].EventType != checkEvent.EventType { t.Fatalf("wrong event type for test event %d: %s != %s", i, coll[i].EventType, checkEvent.EventType) } } } fever-1.0.8/processing/000077500000000000000000000000001353566242200150135ustar00rootroot00000000000000fever-1.0.8/processing/bloom_handler.go000066400000000000000000000224001353566242200201450ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2017, 2019, DCSO GmbH import ( "encoding/json" "fmt" "io" "net/url" "strings" "sync" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" "github.com/DCSO/bloom" log "github.com/sirupsen/logrus" ) var sigs = map[string]string{ "http-url": "%s Possibly bad HTTP URL: ", "http-host": "%s Possibly bad HTTP host: ", "tls-sni": "%s Possibly bad TLS SNI: ", "dns-req": "%s Possibly bad DNS lookup to ", "dns-resp": "%s Possibly bad DNS response for ", } // MakeAlertEntryForHit returns an alert Entry as raised by an external // indicator match, e.g. a Bloom filter hit. The resulting alert will retain // the triggering event's metadata (e.g. 'dns' or 'http' objects) as well as // its timestamp. func MakeAlertEntryForHit(e types.Entry, eType string, alertPrefix string, ioc string) types.Entry { var eve types.EveEvent var newEve types.EveEvent var err = json.Unmarshal([]byte(e.JSONLine), &eve) if err != nil { log.Warn(err, e.JSONLine) } else { var value string if eType == "http-url" { value = fmt.Sprintf("%s | %s | %s", e.HTTPMethod, e.HTTPHost, e.HTTPUrl) } else if eType == "http-host" { value = e.HTTPHost } else if strings.HasPrefix(eType, "dns") { value = e.DNSRRName } else if eType == "tls-sni" { value = e.TLSSni } var sig = "%s Possibly bad traffic: " if v, ok := sigs[eType]; ok { sig = v } newEve = types.EveEvent{ EventType: "alert", Alert: &types.AlertEvent{ Action: "allowed", Category: "Potentially Bad Traffic", Signature: fmt.Sprintf(sig, alertPrefix) + value, }, FlowID: eve.FlowID, Stream: eve.Stream, InIface: eve.InIface, SrcIP: eve.SrcIP, SrcPort: eve.SrcPort, DestIP: eve.DestIP, DestPort: eve.DestPort, Proto: eve.Proto, TxID: eve.TxID, Timestamp: eve.Timestamp, PacketInfo: eve.PacketInfo, HTTP: eve.HTTP, DNS: eve.DNS, TLS: eve.TLS, ExtraInfo: &types.ExtraInfo{ BloomIOC: ioc, }, } } newEntry := e json, err := json.Marshal(newEve) if err != nil { log.Warn(err) } else { newEntry.JSONLine = string(json) } newEntry.EventType = "alert" return newEntry } // BloomHandler is a Handler which is meant to check for the presence of // event type-specific keywords in a Bloom filter, raising new 'alert' type // events when matches are found. type BloomHandler struct { sync.Mutex Logger *log.Entry Name string EventType string IocBloom *bloom.BloomFilter BloomFilename string BloomFileIsCompressed bool DatabaseEventChan chan types.Entry ForwardHandler Handler DoForwardAlert bool AlertPrefix string BlacklistIOCs map[string]struct{} } // BloomNoFileErr is an error thrown when a file-based operation (e.g. // reloading) is attempted on a bloom filter object with no file information // attached. type BloomNoFileErr struct { s string } // Error returns the error message. func (e *BloomNoFileErr) Error() string { return e.s } // MakeBloomHandler returns a new BloomHandler, checking against the given // Bloom filter and sending alerts to databaseChan as well as forwarding them // to a given forwarding handler. func MakeBloomHandler(iocBloom *bloom.BloomFilter, databaseChan chan types.Entry, forwardHandler Handler, alertPrefix string) *BloomHandler { bh := &BloomHandler{ Logger: log.WithFields(log.Fields{ "domain": "bloom", }), IocBloom: iocBloom, DatabaseEventChan: databaseChan, ForwardHandler: forwardHandler, DoForwardAlert: (util.ForwardAllEvents || util.AllowType("alert")), AlertPrefix: alertPrefix, BlacklistIOCs: make(map[string]struct{}), } log.WithFields(log.Fields{ "N": iocBloom.N, "domain": "bloom", }).Info("Bloom filter loaded") return bh } // MakeBloomHandlerFromFile returns a new BloomHandler created from a new // Bloom filter specified by the given file name. func MakeBloomHandlerFromFile(bloomFilename string, compressed bool, databaseChan chan types.Entry, forwardHandler Handler, alertPrefix string, blacklistIOCs []string) (*BloomHandler, error) { log.WithFields(log.Fields{ "domain": "bloom", }).Infof("loading Bloom filter '%s'", bloomFilename) iocBloom, err := bloom.LoadFilter(bloomFilename, compressed) if err != nil { if err == io.EOF { log.Warnf("file is empty, using empty default one") myBloom := bloom.Initialize(100, 0.00000001) iocBloom = &myBloom } else if strings.Contains(err.Error(), "value of k (number of hash functions) is too high") { log.Warnf("malformed Bloom filter file, using empty default one") myBloom := bloom.Initialize(100, 0.00000001) iocBloom = &myBloom } else { return nil, err } } bh := MakeBloomHandler(iocBloom, databaseChan, forwardHandler, alertPrefix) for _, v := range blacklistIOCs { if bh.IocBloom.Check([]byte(v)) { bh.Logger.Warnf("filter contains blacklisted indicator '%s'", v) } bh.BlacklistIOCs[v] = struct{}{} } bh.BloomFilename = bloomFilename bh.BloomFileIsCompressed = compressed bh.Logger.Info("filter loaded successfully", bloomFilename) return bh, nil } // Reload triggers a reload of the contents of the file with the name. func (a *BloomHandler) Reload() error { if a.BloomFilename == "" { return &BloomNoFileErr{"BloomHandler was not created from a file, no reloading possible"} } iocBloom, err := bloom.LoadFilter(a.BloomFilename, a.BloomFileIsCompressed) if err != nil { if err == io.EOF { log.Warnf("file is empty, using empty default one") myBloom := bloom.Initialize(100, 0.00000001) iocBloom = &myBloom } else if strings.Contains(err.Error(), "value of k (number of hash functions) is too high") { log.Warnf("malformed Bloom filter file, using empty default one") myBloom := bloom.Initialize(100, 0.00000001) iocBloom = &myBloom } else { return err } } a.Lock() a.IocBloom = iocBloom for k := range a.BlacklistIOCs { if a.IocBloom.Check([]byte(k)) { a.Logger.Warnf("filter contains blacklisted indicator '%s'", k) } } a.Unlock() log.WithFields(log.Fields{ "N": iocBloom.N, }).Info("Bloom filter reloaded") return nil } // Consume processes an Entry, emitting alerts if there is a match func (a *BloomHandler) Consume(e *types.Entry) error { if e.EventType == "http" { var fullURL string a.Lock() // check HTTP host first: foo.bar.de if a.IocBloom.Check([]byte(e.HTTPHost)) { if _, present := a.BlacklistIOCs[e.HTTPHost]; !present { n := MakeAlertEntryForHit(*e, "http-host", a.AlertPrefix, e.HTTPHost) a.DatabaseEventChan <- n a.ForwardHandler.Consume(&n) } } // we sometimes see full 'URLs' in the corresponding EVE field when // observing requests via proxies. In this case there is no need to // canonicalize the URL, it is already qualified. if strings.Contains(e.HTTPUrl, "://") { fullURL = e.HTTPUrl } else { // in all other cases, we need to create a full URL from the components fullURL = "http://" + e.HTTPHost + e.HTTPUrl } // we now should have a full URL regardless of where it came from: // http://foo.bar.de:123/baz u, err := url.Parse(fullURL) if err != nil { log.Warnf("could not parse URL '%s': %s", fullURL, err.Error()) a.Unlock() return nil } hostPath := fmt.Sprintf("%s%s", u.Host, u.Path) // http://foo.bar.de:123/baz if a.IocBloom.Check([]byte(fullURL)) { if _, present := a.BlacklistIOCs[fullURL]; !present { n := MakeAlertEntryForHit(*e, "http-url", a.AlertPrefix, fullURL) a.DatabaseEventChan <- n a.ForwardHandler.Consume(&n) } } else // foo.bar.de:123/baz if a.IocBloom.Check([]byte(hostPath)) { if _, present := a.BlacklistIOCs[hostPath]; !present { n := MakeAlertEntryForHit(*e, "http-url", a.AlertPrefix, hostPath) a.DatabaseEventChan <- n a.ForwardHandler.Consume(&n) } } else // /baz if a.IocBloom.Check([]byte(u.Path)) { if _, present := a.BlacklistIOCs[u.Path]; !present { n := MakeAlertEntryForHit(*e, "http-url", a.AlertPrefix, u.Path) a.DatabaseEventChan <- n a.ForwardHandler.Consume(&n) } } a.Unlock() } else if e.EventType == "dns" { a.Lock() if a.IocBloom.Check([]byte(e.DNSRRName)) { if _, present := a.BlacklistIOCs[e.DNSRRName]; !present { var n types.Entry if e.DNSType == "query" { n = MakeAlertEntryForHit(*e, "dns-req", a.AlertPrefix, e.DNSRRName) } else if e.DNSType == "answer" { n = MakeAlertEntryForHit(*e, "dns-resp", a.AlertPrefix, e.DNSRRName) } else { log.Warnf("invalid DNS type: '%s'", e.DNSType) a.Unlock() return nil } a.DatabaseEventChan <- n a.ForwardHandler.Consume(&n) } } a.Unlock() } else if e.EventType == "tls" { a.Lock() if a.IocBloom.Check([]byte(e.TLSSni)) { if _, present := a.BlacklistIOCs[e.TLSSni]; !present { n := MakeAlertEntryForHit(*e, "tls-sni", a.AlertPrefix, e.TLSSni) a.DatabaseEventChan <- n a.ForwardHandler.Consume(&n) } } a.Unlock() } return nil } // GetName returns the name of the handler func (a *BloomHandler) GetName() string { return "Bloom filter handler" } // GetEventTypes returns a slice of event type strings that this handler // should be applied to func (a *BloomHandler) GetEventTypes() []string { return []string{"http", "dns", "tls"} } fever-1.0.8/processing/bloom_handler_test.go000066400000000000000000000567031353566242200212210ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2017, 2018, 2019, DCSO GmbH import ( "encoding/json" "fmt" "io/ioutil" "math/rand" "os" "regexp" "sync" "testing" "time" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" "github.com/DCSO/bloom" log "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" ) var ( reHTTPURL = regexp.MustCompile(`Possibly bad HTTP URL: [^ ]+ . ([^ ]+) . ([^" ]+)`) reHTTPHost = regexp.MustCompile(`Possibly bad HTTP host: ([^" ]+)`) reDNSReq = regexp.MustCompile("Possibly bad DNS lookup to ([^\" ]+)") reDNSRep = regexp.MustCompile("Possibly bad DNS response for ([^\" ]+)") reSNI = regexp.MustCompile("Possibly bad TLS SNI: ([^\" ]+)") ) func makeBloomDNSEvent(rrname string) types.Entry { e := types.Entry{ SrcIP: fmt.Sprintf("10.0.0.%d", rand.Intn(5)+1), SrcPort: 53, DestIP: fmt.Sprintf("10.0.0.%d", rand.Intn(50)), DestPort: []int64{11, 12, 13, 14, 15}[rand.Intn(5)], Timestamp: time.Now().Format(types.SuricataTimestampFormat), EventType: "dns", Proto: "TCP", DNSRCode: []string{"NOERROR", "NXDOMAIN"}[rand.Intn(2)], DNSRData: fmt.Sprintf("10.%d.0.%d", rand.Intn(50), rand.Intn(50)+100), DNSRRName: rrname, DNSRRType: "A", DNSType: []string{"answer", "query"}[rand.Intn(2)], } eve := types.EveEvent{ EventType: e.EventType, SrcIP: e.SrcIP, SrcPort: int(e.SrcPort), DestIP: e.DestIP, DestPort: int(e.DestPort), Proto: e.Proto, DNS: &types.DNSEvent{ Rcode: e.DNSRCode, Rrname: e.DNSRRName, Rdata: e.DNSRData, Rrtype: e.DNSRRType, Type: e.DNSType, }, } json, err := json.Marshal(eve) if err != nil { log.Warn(err) } else { e.JSONLine = string(json) } return e } func makeBloomHTTPEvent(host string, url string) types.Entry { e := types.Entry{ SrcIP: fmt.Sprintf("10.0.0.%d", rand.Intn(5)+1), SrcPort: int64(rand.Intn(60000) + 1025), DestIP: fmt.Sprintf("10.0.0.%d", rand.Intn(50)), DestPort: 80, Timestamp: time.Now().Format(types.SuricataTimestampFormat), EventType: "http", Proto: "TCP", HTTPHost: host, HTTPUrl: url, HTTPMethod: "GET", } eve := types.EveEvent{ EventType: e.EventType, SrcIP: e.SrcIP, SrcPort: int(e.SrcPort), DestIP: e.DestIP, DestPort: int(e.DestPort), Proto: e.Proto, HTTP: &types.HTTPEvent{ Hostname: e.HTTPHost, URL: e.HTTPUrl, }, } json, err := json.Marshal(eve) if err != nil { log.Warn(err) } else { e.JSONLine = string(json) } return e } func makeBloomTLSEvent(host string) types.Entry { e := types.Entry{ SrcIP: fmt.Sprintf("10.0.0.%d", rand.Intn(5)+1), SrcPort: int64(rand.Intn(60000) + 1025), DestIP: fmt.Sprintf("10.0.0.%d", rand.Intn(50)), DestPort: 443, Timestamp: time.Now().Format(types.SuricataTimestampFormat), EventType: "tls", Proto: "TCP", TLSSni: host, } eve := types.EveEvent{ EventType: e.EventType, SrcIP: e.SrcIP, SrcPort: int(e.SrcPort), DestIP: e.DestIP, DestPort: int(e.DestPort), Proto: e.Proto, TLS: &types.TLSEvent{ Sni: e.TLSSni, }, } json, err := json.Marshal(eve) if err != nil { log.Warn(err) } else { e.JSONLine = string(json) } return e } var testURLs []string var testHosts []string var testTLSHosts []string const numOfTestBloomItems = 1000 // fill Bloom filter with disjunct set of values func fillBloom(b *bloom.BloomFilter) { testURLs = make([]string, 0) testHosts = make([]string, 0) testTLSHosts = make([]string, 0) i := 0 for i < numOfTestBloomItems { val := fmt.Sprintf("%s.com", util.RandStringBytesMaskImprSrc(6)) for b.Check([]byte(val)) { val = fmt.Sprintf("%s.com", util.RandStringBytesMaskImprSrc(6)) } i++ testHosts = append(testHosts, val) b.Add([]byte(val)) } i = 0 for i < numOfTestBloomItems { val := fmt.Sprintf("%s.com", util.RandStringBytesMaskImprSrc(6)) for b.Check([]byte(val)) { val = fmt.Sprintf("%s.com", util.RandStringBytesMaskImprSrc(6)) } i++ testTLSHosts = append(testTLSHosts, val) b.Add([]byte(val)) } i = 0 for i < numOfTestBloomItems { val := fmt.Sprintf("http://foo.com/%s.html", util.RandStringBytesMaskImprSrc(6)) for b.Check([]byte(val)) { val = fmt.Sprintf("http://foo.com/%s.html", util.RandStringBytesMaskImprSrc(6)) } i++ testURLs = append(testURLs, val) b.Add([]byte(val)) } } // CollectorHandler simply gathers consumed events in a list type CollectorHandler struct { EntriesLock sync.Mutex Entries map[string]bool } func (h *CollectorHandler) GetName() string { return "Collector handler" } func (h *CollectorHandler) GetEventTypes() []string { return []string{"alert"} } func (h *CollectorHandler) Consume(e *types.Entry) error { h.EntriesLock.Lock() defer h.EntriesLock.Unlock() match := reHTTPURL.FindStringSubmatch(e.JSONLine) if match != nil { url := match[2] h.Entries[url] = true return nil } match = reHTTPHost.FindStringSubmatch(e.JSONLine) if match != nil { host := match[1] h.Entries[host] = true return nil } match = reDNSReq.FindStringSubmatch(e.JSONLine) if match != nil { var eve types.EveEvent var err = json.Unmarshal([]byte(e.JSONLine), &eve) if err != nil { log.Fatal(err) } if eve.DNS.Type != "query" { log.Fatalf("request alert for type (%s) != query", eve.DNS.Type) } h.Entries[match[1]] = true return nil } match = reDNSRep.FindStringSubmatch(e.JSONLine) if match != nil { var eve types.EveEvent var err = json.Unmarshal([]byte(e.JSONLine), &eve) if err != nil { log.Fatal(err) } if eve.DNS.Type != "answer" { log.Fatalf("request alert for type (%s) != answer", eve.DNS.Type) } h.Entries[match[1]] = true return nil } match = reSNI.FindStringSubmatch(e.JSONLine) if match != nil { h.Entries[match[1]] = true return nil } return nil } func (h *CollectorHandler) Reset() { h.EntriesLock.Lock() defer h.EntriesLock.Unlock() h.Entries = make(map[string]bool) } func (h *CollectorHandler) GetEntries() map[string]bool { h.EntriesLock.Lock() defer h.EntriesLock.Unlock() return h.Entries } func TestBloomHandler(t *testing.T) { // make sure that alerts are forwarded util.PrepareEventFilter([]string{"alert"}, false) // initalize Bloom filter and fill with 'interesting' values bf := bloom.Initialize(100000, 0.0000001) fillBloom(&bf) // channel to receive events to be saved to database dbChan := make(chan types.Entry) // handler to receive forwarded events fwhandler := &CollectorHandler{ Entries: make(map[string]bool), } // concurrently gather entries to be written to DB dbWritten := make([]types.Entry, 0) consumeWaitChan := make(chan bool) go func() { for e := range dbChan { dbWritten = append(dbWritten, e) } close(consumeWaitChan) }() bh := MakeBloomHandler(&bf, dbChan, fwhandler, "FOO BAR") err := bh.Reload() if err == nil { t.Fatal("reloading without file should fail") } bhTypes := bh.GetEventTypes() if len(bhTypes) != 3 { t.Fatal("Bloom handler should claim three types") } if bhTypes[0] != "http" { t.Fatal("Bloom handler should claim 'http' type") } if bhTypes[1] != "dns" { t.Fatal("Bloom handler should claim 'dns' type") } if bhTypes[2] != "tls" { t.Fatal("Bloom handler should claim 'tls' type") } if bh.GetName() != "Bloom filter handler" { t.Fatal("Bloom handler has wrong name") } i := 0 j := 0 k := 0 for { var e types.Entry // emit Bloom filter TP event with 20% individual probability, at most // each if 2 < rand.Intn(10) { if i == numOfTestBloomItems && j == numOfTestBloomItems && k == numOfTestBloomItems { break } // uniformly distribute hits over HTTP URL/Host and DNS lookups switch rnd := rand.Intn(3); rnd { case 0: if i < numOfTestBloomItems { e = makeBloomDNSEvent(testHosts[i]) bh.Consume(&e) i++ } case 1: if j < numOfTestBloomItems { e = makeBloomHTTPEvent("foo.com", testURLs[j]) bh.Consume(&e) j++ } case 2: if k < numOfTestBloomItems { e = makeBloomTLSEvent(testTLSHosts[k]) bh.Consume(&e) k++ } } } else { // uniformly distribute non-matching hits over HTTP URL/Host and DNS lookups switch rnd := rand.Intn(3); rnd { case 0: s := fmt.Sprintf("%s.com", util.RandStringBytesMaskImprSrc(6)) for bf.Check([]byte(s)) { s = fmt.Sprintf("%s.%s", util.RandStringBytesMaskImprSrc(6), util.RandStringBytesMaskImprSrc(2)) } e = makeBloomDNSEvent(s) bh.Consume(&e) case 1: s := fmt.Sprintf("/%s.html", util.RandStringBytesMaskImprSrc(6)) for bf.Check([]byte(s)) { s = fmt.Sprintf("/%s.%s.html", util.RandStringBytesMaskImprSrc(6), util.RandStringBytesMaskImprSrc(6)) } e = makeBloomHTTPEvent("foo.com", s) bh.Consume(&e) case 2: s := fmt.Sprintf("%s.com", util.RandStringBytesMaskImprSrc(6)) for bf.Check([]byte(s)) { s = fmt.Sprintf("%s.%s", util.RandStringBytesMaskImprSrc(6), util.RandStringBytesMaskImprSrc(2)) } e = makeBloomTLSEvent(s) bh.Consume(&e) } } } // wait until all values have been collected close(dbChan) <-consumeWaitChan // check that we haven't missed anything if len(fwhandler.Entries) < 3*numOfTestBloomItems { t.Fatalf("expected %d forwarded BLF alerts, seen less (%d)", numOfTestBloomItems, len(fwhandler.Entries)) } // we want _at least_ to have the test values forwarded as alerts // (as FP are possible) for _, v := range testHosts { if _, ok := fwhandler.Entries[v]; !ok { t.Fatalf("testhost %s not forwarded", v) } } for _, v := range testURLs { if _, ok := fwhandler.Entries[v]; !ok { t.Fatalf("testurl %s not forwarded", v) } } } func TestBloomHandlerFromFile(t *testing.T) { b1 := bloom.Initialize(1000, 0.0001) b2 := bloom.Initialize(1000, 0.0001) b1.Add([]byte("foobar")) b2.Add([]byte("baz")) b1File, err := ioutil.TempFile("", "example") if err != nil { t.Fatal(err) } defer os.Remove(b1File.Name()) b1.Write(b1File) b1File.Close() // handler to receive forwarded events fwhandler := &CollectorHandler{ Entries: make(map[string]bool), } dbChan := make(chan types.Entry, 10) defer close(dbChan) bh, err := MakeBloomHandlerFromFile(b1File.Name(), false, dbChan, fwhandler, "FOO BAR", []string{"/"}) if err != nil { t.Fatal(err) } e := makeBloomDNSEvent("foobar") bh.Consume(&e) if len(fwhandler.Entries) != 1 { t.Fatalf("Unexpected number of entries: %d != 1 ", len(fwhandler.Entries)) } if !fwhandler.Entries["foobar"] { t.Fatalf("expected entry is missing") } e = makeBloomDNSEvent("baz") bh.Consume(&e) if len(fwhandler.Entries) != 1 { t.Fatalf("Unexpected number of entries: %d != 1 ", len(fwhandler.Entries)) } if !fwhandler.Entries["foobar"] { t.Fatalf("expected entry is missing") } b2File, err := os.OpenFile(b1File.Name(), os.O_RDWR|os.O_CREATE, 0755) if err != nil { t.Fatal(err) } b2.Write(b2File) b2File.Close() bh.Reload() fwhandler.Entries = make(map[string]bool) e = makeBloomDNSEvent("baz") bh.Consume(&e) if len(fwhandler.Entries) != 1 { t.Fatalf("Unexpected number of entries: %d != 1 ", len(fwhandler.Entries)) } if !fwhandler.Entries["baz"] { t.Fatalf("expected entry is missing") } if fwhandler.Entries["foobar"] { t.Fatalf("unexpected entry") } e = makeBloomDNSEvent("foobar") bh.Consume(&e) if len(fwhandler.Entries) != 1 { t.Fatalf("Unexpected number of entries: %d != 1 ", len(fwhandler.Entries)) } if !fwhandler.Entries["baz"] { t.Fatalf("expected entry is missing") } if fwhandler.Entries["foobar"] { t.Fatalf("unexpected entry") } } func TestBloomHandlerEmptyInput(t *testing.T) { blFile, err := ioutil.TempFile("", "empty") if err != nil { t.Fatal(err) } defer os.Remove(blFile.Name()) blFile.Close() dbChan := make(chan types.Entry, 10) defer close(dbChan) bf, err := MakeBloomHandlerFromFile(blFile.Name(), false, dbChan, nil, "FOO BAR", []string{"/"}) if err != nil { t.Fatal(err) } if bf == nil { t.Fatal("bloom filter should not be nil for empty file") } } func TestBloomHandlerBlacklistedInputFromFile(t *testing.T) { b1 := bloom.Initialize(1000, 0.0001) b1.Add([]byte("/")) b1File, err := ioutil.TempFile("", "blist") if err != nil { t.Fatal(err) } defer os.Remove(b1File.Name()) b1.Write(b1File) b1File.Close() b2 := bloom.Initialize(1000, 0.0001) b2.Add([]byte("/foobarbaz")) dbChan := make(chan types.Entry, 10) defer close(dbChan) hook := test.NewGlobal() _, err = MakeBloomHandlerFromFile(b1File.Name(), false, nil, nil, "FOO BAR", []string{"/"}) if err != nil { t.Fatal(err) } entries := hook.AllEntries() if len(entries) != 4 { t.Fatal("missing log entries") } if entries[2].Message != "filter contains blacklisted indicator '/'" { t.Fatal("wrong log entry for invalid IP range") } b2File, err := os.OpenFile(b1File.Name(), os.O_RDWR|os.O_CREATE, 0755) if err != nil { t.Fatal(err) } b2.Write(b2File) b2File.Close() bf, err := MakeBloomHandlerFromFile(b1File.Name(), false, nil, nil, "FOO BAR", []string{"/"}) if err != nil { t.Fatal(err) } b2File, err = os.OpenFile(b1File.Name(), os.O_RDWR|os.O_CREATE, 0755) if err != nil { t.Fatal(err) } b1.Write(b2File) b2File.Close() hook.Reset() err = bf.Reload() if err != nil { t.Fatal(err) } entries = hook.AllEntries() if len(entries) != 2 { t.Fatal("missing log entries") } if entries[0].Message != "filter contains blacklisted indicator '/'" { t.Fatal("wrong log entry for invalid IP range") } } func TestBloomHandlerURL(t *testing.T) { e1 := types.Entry{ SrcIP: "10.0.0.1", SrcPort: 23545, DestIP: "10.0.0.2", DestPort: 80, Timestamp: time.Now().Format(types.SuricataTimestampFormat), EventType: "http", Proto: "TCP", HTTPHost: "foo.bar.de", HTTPUrl: "http://foo.bar.de/oddlyspecific", HTTPMethod: "GET", } eve1 := types.EveEvent{ EventType: e1.EventType, SrcIP: e1.SrcIP, SrcPort: int(e1.SrcPort), DestIP: e1.DestIP, DestPort: int(e1.DestPort), Proto: e1.Proto, HTTP: &types.HTTPEvent{ Hostname: e1.HTTPHost, URL: e1.HTTPUrl, }, } json1, err := json.Marshal(eve1) if err != nil { log.Warn(err) } else { e1.JSONLine = string(json1) } e2 := types.Entry{ SrcIP: "10.0.0.1", SrcPort: 23545, DestIP: "10.0.0.2", DestPort: 80, Timestamp: time.Now().Format(types.SuricataTimestampFormat), EventType: "http", Proto: "TCP", HTTPHost: "foo.bar.de", HTTPUrl: "/oddlyspecific", HTTPMethod: "GET", } eve2 := types.EveEvent{ EventType: e2.EventType, SrcIP: e2.SrcIP, SrcPort: int(e2.SrcPort), DestIP: e2.DestIP, DestPort: int(e2.DestPort), Proto: e2.Proto, HTTP: &types.HTTPEvent{ Hostname: e2.HTTPHost, URL: e2.HTTPUrl, }, } json2, err := json.Marshal(eve2) if err != nil { log.Warn(err) } else { e2.JSONLine = string(json2) } e3 := types.Entry{ SrcIP: "10.0.0.1", SrcPort: 23545, DestIP: "10.0.0.2", DestPort: 80, Timestamp: time.Now().Format(types.SuricataTimestampFormat), EventType: "http", Proto: "TCP", HTTPHost: "foo.bar.com", HTTPUrl: "/oddlyspecific", HTTPMethod: "GET", } eve3 := types.EveEvent{ EventType: e3.EventType, SrcIP: e3.SrcIP, SrcPort: int(e3.SrcPort), DestIP: e3.DestIP, DestPort: int(e3.DestPort), Proto: e3.Proto, HTTP: &types.HTTPEvent{ Hostname: e3.HTTPHost, URL: e3.HTTPUrl, }, } json3, err := json.Marshal(eve3) if err != nil { log.Warn(err) } else { e3.JSONLine = string(json3) } dbChan := make(chan types.Entry) dbWritten := make([]types.Entry, 0) consumeWaitChan := make(chan bool) go func() { for e := range dbChan { dbWritten = append(dbWritten, e) } close(consumeWaitChan) }() util.PrepareEventFilter([]string{"alert"}, false) // initalize Bloom filter and fill with 'interesting' values bf := bloom.Initialize(100000, 0.0000001) bf.Add([]byte("/oddlyspecific")) // handler to receive forwarded events fwhandler := &CollectorHandler{ Entries: make(map[string]bool), } bh := MakeBloomHandler(&bf, dbChan, fwhandler, "FOO BAR") bh.Consume(&e1) if len(fwhandler.GetEntries()) != 1 { t.Fatalf("not enough alerts: %d", len(fwhandler.GetEntries())) } bf = bloom.Initialize(100000, 0.0000001) bf.Add([]byte("foo.bar.de/oddlyspecific")) fwhandler.Reset() bh.Consume(&e1) if len(fwhandler.GetEntries()) != 1 { t.Fatalf("not enough alerts: %d", len(fwhandler.GetEntries())) } bf = bloom.Initialize(100000, 0.0000001) bf.Add([]byte("http://foo.bar.de/oddlyspecific")) fwhandler.Reset() bh.Consume(&e1) if len(fwhandler.GetEntries()) != 1 { t.Fatalf("not enough alerts: %d", len(fwhandler.GetEntries())) } bf = bloom.Initialize(100000, 0.0000001) bf.Add([]byte("https://foo.bar.de/oddlyspecific")) fwhandler.Reset() bh.Consume(&e1) if len(fwhandler.GetEntries()) != 0 { t.Fatalf("too many alerts: %d", len(fwhandler.GetEntries())) } bf = bloom.Initialize(100000, 0.0000001) bf.Add([]byte("https://foo.bar.com/oddlyspecific")) fwhandler.Reset() bh.Consume(&e1) if len(fwhandler.GetEntries()) != 0 { t.Fatalf("too many alerts: %d", len(fwhandler.GetEntries())) } bf = bloom.Initialize(100000, 0.0000001) bf.Add([]byte("/")) fwhandler.Reset() bh.Consume(&e1) if len(fwhandler.GetEntries()) != 0 { t.Fatalf("too many alerts: %d", len(fwhandler.GetEntries())) } bf = bloom.Initialize(100000, 0.0000001) bf.Add([]byte("/oddlyspecific")) fwhandler.Reset() bh.Consume(&e2) if len(fwhandler.GetEntries()) != 1 { t.Fatalf("not enough alerts: %d", len(fwhandler.GetEntries())) } bf = bloom.Initialize(100000, 0.0000001) bf.Add([]byte("foo.bar.de/oddlyspecific")) fwhandler.Reset() bh.Consume(&e2) if len(fwhandler.GetEntries()) != 1 { t.Fatalf("not enough alerts: %d", len(fwhandler.GetEntries())) } bf = bloom.Initialize(100000, 0.0000001) bf.Add([]byte("http://foo.bar.de/oddlyspecific")) fwhandler.Reset() bh.Consume(&e2) if len(fwhandler.GetEntries()) != 1 { t.Fatalf("not enough alerts: %d", len(fwhandler.GetEntries())) } bf = bloom.Initialize(100000, 0.0000001) bf.Add([]byte("https://foo.bar.de/oddlyspecific")) fwhandler.Reset() bh.Consume(&e2) if len(fwhandler.GetEntries()) != 0 { t.Fatalf("too many alerts: %d", len(fwhandler.GetEntries())) } bf = bloom.Initialize(100000, 0.0000001) bf.Add([]byte("https://foo.bar.com/oddlyspecific")) fwhandler.Reset() bh.Consume(&e2) if len(fwhandler.GetEntries()) != 0 { t.Fatalf("too many alerts: %d", len(fwhandler.GetEntries())) } bf = bloom.Initialize(100000, 0.0000001) bf.Add([]byte("/")) fwhandler.Reset() bh.Consume(&e2) if len(fwhandler.GetEntries()) != 0 { t.Fatalf("too many alerts: %d", len(fwhandler.GetEntries())) } bf = bloom.Initialize(100000, 0.0000001) bf.Add([]byte("/oddlyspecific")) fwhandler.Reset() bh.Consume(&e3) if len(fwhandler.GetEntries()) != 1 { t.Fatalf("not enough alerts: %d", len(fwhandler.GetEntries())) } bf = bloom.Initialize(100000, 0.0000001) bf.Add([]byte("foo.bar.de/oddlyspecific")) fwhandler.Reset() bh.Consume(&e3) if len(fwhandler.GetEntries()) != 0 { t.Fatalf("too many alerts: %d", len(fwhandler.GetEntries())) } bf = bloom.Initialize(100000, 0.0000001) bf.Add([]byte("http://foo.bar.de/oddlyspecific")) fwhandler.Reset() bh.Consume(&e3) if len(fwhandler.GetEntries()) != 0 { t.Fatalf("too many alerts: %d", len(fwhandler.GetEntries())) } bf = bloom.Initialize(100000, 0.0000001) bf.Add([]byte("https://foo.bar.de/oddlyspecific")) fwhandler.Reset() bh.Consume(&e3) if len(fwhandler.GetEntries()) != 0 { t.Fatalf("too many alerts: %d", len(fwhandler.GetEntries())) } bf = bloom.Initialize(100000, 0.0000001) bf.Add([]byte("https://foo.bar.com/oddlyspecific")) fwhandler.Reset() bh.Consume(&e3) if len(fwhandler.GetEntries()) != 0 { t.Fatalf("too many alerts: %d", len(fwhandler.GetEntries())) } bf = bloom.Initialize(100000, 0.0000001) bf.Add([]byte("/")) fwhandler.Reset() bh.Consume(&e3) if len(fwhandler.GetEntries()) != 0 { t.Fatalf("too many alerts: %d", len(fwhandler.GetEntries())) } } func TestBloomHandlerBlacklistedSkip(t *testing.T) { e1 := types.Entry{ SrcIP: "10.0.0.1", SrcPort: 23545, DestIP: "10.0.0.2", DestPort: 80, Timestamp: time.Now().Format(types.SuricataTimestampFormat), EventType: "http", Proto: "TCP", HTTPHost: "foo.bar.de", HTTPUrl: "http://foo.bar.de/oddlyspecific", HTTPMethod: "GET", } eve1 := types.EveEvent{ EventType: e1.EventType, SrcIP: e1.SrcIP, SrcPort: int(e1.SrcPort), DestIP: e1.DestIP, DestPort: int(e1.DestPort), Proto: e1.Proto, HTTP: &types.HTTPEvent{ Hostname: e1.HTTPHost, URL: e1.HTTPUrl, }, } json1, err := json.Marshal(eve1) if err != nil { log.Warn(err) } else { e1.JSONLine = string(json1) } e2 := types.Entry{ SrcIP: "10.0.0.1", SrcPort: 23545, DestIP: "10.0.0.2", DestPort: 80, Timestamp: time.Now().Format(types.SuricataTimestampFormat), EventType: "http", Proto: "TCP", HTTPHost: "foo.bar.de", HTTPUrl: "/", HTTPMethod: "GET", } eve2 := types.EveEvent{ EventType: e2.EventType, SrcIP: e2.SrcIP, SrcPort: int(e2.SrcPort), DestIP: e2.DestIP, DestPort: int(e2.DestPort), Proto: e2.Proto, HTTP: &types.HTTPEvent{ Hostname: e2.HTTPHost, URL: e2.HTTPUrl, }, } json2, err := json.Marshal(eve2) if err != nil { log.Warn(err) } else { e2.JSONLine = string(json2) } b1 := bloom.Initialize(1000, 0.0001) b1.Add([]byte("/oddlyspecific")) b1.Add([]byte("/")) b1File, err := ioutil.TempFile("", "blist") if err != nil { t.Fatal(err) } defer os.Remove(b1File.Name()) b1.Write(b1File) b1File.Close() dbChan := make(chan types.Entry, 5) dbWritten := make([]types.Entry, 0) consumeWaitChan := make(chan bool) go func() { for e := range dbChan { dbWritten = append(dbWritten, e) } close(consumeWaitChan) }() util.PrepareEventFilter([]string{"alert"}, false) // handler to receive forwarded events fwhandler := &CollectorHandler{ Entries: make(map[string]bool), } bh, err := MakeBloomHandlerFromFile(b1File.Name(), false, dbChan, fwhandler, "FOO BAR", []string{"/"}) if err != nil { t.Fatal(err) } bh.Consume(&e1) if len(fwhandler.GetEntries()) != 1 { t.Fatalf("not enough alerts: %d", len(fwhandler.GetEntries())) } fwhandler.Reset() bh.Consume(&e2) if len(fwhandler.GetEntries()) != 0 { t.Fatalf("should not create alert but got %d", len(fwhandler.GetEntries())) } bh.Consume(&e1) if len(fwhandler.GetEntries()) != 1 { t.Fatalf("not enough alerts: %d", len(fwhandler.GetEntries())) } } func TestBloomHandlerInvalidDNS(t *testing.T) { // make sure that alerts are forwarded util.PrepareEventFilter([]string{"alert"}, false) // initalize Bloom filter and fill with 'interesting' values bf := bloom.Initialize(100000, 0.0000001) // channel to receive events to be saved to database dbChan := make(chan types.Entry) // handler to receive forwarded events fwhandler := &CollectorHandler{ Entries: make(map[string]bool), } // concurrently gather entries to be written to DB dbWritten := make([]types.Entry, 0) consumeWaitChan := make(chan bool) go func() { for e := range dbChan { dbWritten = append(dbWritten, e) } close(consumeWaitChan) }() bh := MakeBloomHandler(&bf, dbChan, fwhandler, "FOO BAR") e := makeBloomDNSEvent("foobar") e.DNSType = "foobar" bf.Add([]byte(e.DNSRRName)) hook := test.NewGlobal() bh.Consume(&e) entries := hook.AllEntries() if len(entries) < 1 { t.Fatal("missing log entries") } if entries[0].Message != "invalid DNS type: 'foobar'" { t.Fatal("wrong log entry for invalid DNS type") } } fever-1.0.8/processing/context_collector.go000066400000000000000000000123601353566242200210760ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2019, DCSO GmbH import ( "sync" "time" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" "github.com/patrickmn/go-cache" log "github.com/sirupsen/logrus" ) // DebugOutputInterval specifies the amount of cache operations before // printing the current cache size, in verbose mode. const DebugOutputInterval = 100000 // GlobalContextCollector is a shared ContextCollector to be used by FEVER. var GlobalContextCollector *ContextCollector // ContextShipper is a function that processes a slice of Entries that make up a // context of an alert, e.g. all events that share a flow ID relevant for the // alert. type ContextShipper func(Context, *log.Entry) error // ContextCollectorPerfStats contains performance stats written to InfluxDB // for monitoring. type ContextCollectorPerfStats struct { Flows uint64 `influx:"context_flows"` Events uint64 `influx:"context_events"` JSONBytes uint64 `influx:"context_json_bytes"` } // ContextCollector is a component that maintains a cache of metadata per // flow ID, forwarding it to a specified sink if associated with an alert. type ContextCollector struct { PerfStats ContextCollectorPerfStats StatsEncoder *util.PerformanceStatsEncoder StopChan chan bool StoppedChan chan bool StopCounterChan chan bool StoppedCounterChan chan bool Running bool StatsLock sync.Mutex Cache *cache.Cache MarkLock sync.Mutex Marked map[string]struct{} Logger *log.Entry i uint64 Ship ContextShipper } // Context is a collection of JSON events that belong to a given flow. type Context []string // MakeContextCollector creates a new ContextCollector. func MakeContextCollector(shipper ContextShipper, defaultTTL time.Duration) *ContextCollector { c := &ContextCollector{ Logger: log.WithFields(log.Fields{ "domain": "context", }), Cache: cache.New(defaultTTL, defaultTTL), Marked: make(map[string]struct{}), i: 0, Ship: shipper, } c.Logger.Debugf("created cache with default TTL %v", defaultTTL) return c } // Mark queues metadata for a given flow for forwarding, identified by its // flow ID. func (c *ContextCollector) Mark(flowID string) { // when seeing an alert, just mark the flow ID as relevant c.MarkLock.Lock() c.Marked[flowID] = struct{}{} c.MarkLock.Unlock() } // Consume processes an Entry, adding the data within to the internal // aggregated state func (c *ContextCollector) Consume(e *types.Entry) error { var myC Context // Some events, e.g. stats, have no flow ID set if e.FlowID == "" { return nil } cval, exist := c.Cache.Get(e.FlowID) if exist { // the 'flow' event always comes last, so we can use it as an // indicator that the flow is complete and can be processed if e.EventType == types.EventTypeFlow { var isMarked bool c.MarkLock.Lock() if _, ok := c.Marked[e.FlowID]; ok { isMarked = true } c.MarkLock.Unlock() if isMarked { c.StatsLock.Lock() c.PerfStats.Flows++ c.PerfStats.Events += uint64(len(cval.(Context))) for _, v := range cval.(Context) { c.PerfStats.JSONBytes += uint64(len(v)) } c.StatsLock.Unlock() c.Ship(cval.(Context), c.Logger) delete(c.Marked, e.FlowID) } c.Cache.Delete(e.FlowID) } else { myC = cval.(Context) myC = append(myC, e.JSONLine) c.Cache.Set(e.FlowID, myC, cache.DefaultExpiration) } } else { if e.EventType != types.EventTypeFlow { myC = append(myC, e.JSONLine) c.Cache.Set(e.FlowID, myC, cache.DefaultExpiration) } } c.i++ if c.i%DebugOutputInterval == 0 { count := c.Cache.ItemCount() c.Logger.WithFields(log.Fields{ "n": count, }).Debugf("cache size after another %d events", DebugOutputInterval) c.i = 0 } return nil } func (c *ContextCollector) runCounter() { sTime := time.Now() for { time.Sleep(500 * time.Millisecond) select { case <-c.StopCounterChan: close(c.StoppedCounterChan) return default: if c.StatsEncoder == nil || time.Since(sTime) < c.StatsEncoder.SubmitPeriod { continue } c.StatsEncoder.Submit(c.PerfStats) c.StatsLock.Lock() c.PerfStats.JSONBytes = 0 c.PerfStats.Flows = 0 c.PerfStats.Events = 0 sTime = time.Now() c.StatsLock.Unlock() } } } // GetName returns the name of the handler func (c *ContextCollector) GetName() string { return "Context collector" } // GetEventTypes returns a slice of event type strings that this handler // should be applied to func (c *ContextCollector) GetEventTypes() []string { return []string{"*"} } // Run starts the metrics collection and submission in the ContextCollector. func (c *ContextCollector) Run() { if !c.Running { c.StopChan = make(chan bool) c.StopCounterChan = make(chan bool) c.StoppedCounterChan = make(chan bool) go c.runCounter() c.Running = true } } // Stop stops the metrics collection and submission in the ContextCollector. func (c *ContextCollector) Stop(stoppedChan chan bool) { if c.Running { close(c.StopCounterChan) <-c.StoppedCounterChan c.StoppedChan = stoppedChan close(c.StopChan) c.Running = false } } // SubmitStats registers a PerformanceStatsEncoder for runtime stats submission. func (c *ContextCollector) SubmitStats(sc *util.PerformanceStatsEncoder) { c.StatsEncoder = sc } fever-1.0.8/processing/context_collector_test.go000066400000000000000000000051541353566242200221400ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2019, DCSO GmbH import ( "encoding/json" "fmt" "math/rand" "reflect" "testing" "time" "github.com/DCSO/fever/types" log "github.com/sirupsen/logrus" ) func makeCCTestEvent(eType, flowID string) types.Entry { e := types.Entry{ SrcIP: fmt.Sprintf("10.%d.%d.%d", rand.Intn(250), rand.Intn(250), rand.Intn(250)), SrcPort: []int64{1, 2, 3, 4, 5}[rand.Intn(5)], DestIP: fmt.Sprintf("10.0.0.%d", rand.Intn(250)), DestPort: []int64{11, 12, 13, 14, 15}[rand.Intn(5)], Timestamp: time.Now().Format(types.SuricataTimestampFormat), EventType: eType, Proto: "TCP", FlowID: flowID, } jsonBytes, _ := json.Marshal(e) e.JSONLine = string(jsonBytes) return e } func TestContextCollector(t *testing.T) { markedVals := make(map[string][]string) seenMarked := make(map[string][]string) dsub := func(entries Context, logger *log.Entry) error { for _, v := range entries { var parsed struct { FlowID string } err := json.Unmarshal([]byte(v), &parsed) if err != nil { t.Fatal(err) } seenMarked[parsed.FlowID] = append(seenMarked[parsed.FlowID], v) } return nil } cc := MakeContextCollector(dsub, 5*time.Minute) nofReports := 0 for i := 0; i < 10000; i++ { isMarked := (rand.Intn(20) < 1) flowID := fmt.Sprintf("%d", rand.Intn(10000000)+10000) if isMarked { nofReports++ cc.Mark(flowID) } for j := 0; j < rand.Intn(200)+1; j++ { ev := makeCCTestEvent([]string{"http", "smb", "dns"}[rand.Intn(3)], flowID) if isMarked { markedVals[flowID] = append(markedVals[flowID], ev.JSONLine) } cc.Consume(&ev) } ev := makeCCTestEvent("flow", flowID) cc.Consume(&ev) } if len(markedVals) != len(seenMarked) { t.Fatalf("number of marked flows (%d) != number of results (%d)", len(markedVals), len(seenMarked)) } if !reflect.DeepEqual(markedVals, seenMarked) { t.Fatal("contents of results and recorded metadata maps differ") } } func TestContextCollectorMissingFlowID(t *testing.T) { e := types.Entry{ Timestamp: time.Now().Format(types.SuricataTimestampFormat), EventType: "stats", } jsonBytes, _ := json.Marshal(e) e.JSONLine = string(jsonBytes) count := 0 dsub := func(entries Context, logger *log.Entry) error { count++ return nil } cc := MakeContextCollector(dsub, 5*time.Minute) cc.Consume(&e) if count != 0 { t.Fatalf("event with empty flow ID was considered") } flowID := "12345" cc.Mark(flowID) ev := makeCCTestEvent("dns", flowID) cc.Consume(&ev) ev = makeCCTestEvent("flow", flowID) cc.Consume(&ev) if count != 1 { t.Fatalf("wrong number of entries: %d", count) } } fever-1.0.8/processing/context_shipper_amqp.go000066400000000000000000000034301353566242200215760ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2019, DCSO GmbH import ( "encoding/json" "time" "github.com/DCSO/fever/util" log "github.com/sirupsen/logrus" ) const ( // ContextQueueLength is the length of the queue buffering incoming context // bundles to balance out potential transmission delays. ContextQueueLength = 100 ) // ContextChunk represents a collection of events for transmission via AMQP. type ContextChunk struct { Timestamp time.Time `json:"timestamp"` SensorID string `json:"sensor_id"` Events []interface{} `json:"events"` } // ContextShipperAMQP is a ContextShipper that sends incoming context bundles to // an AMQP exchange. type ContextShipperAMQP struct { Submitter util.StatsSubmitter InChan chan Context SensorID string } // Start initiates the concurrent handling of incoming context bundles in the // Shipper's input channel. It will stop automatically once this channel is // closed. func (cs *ContextShipperAMQP) Start(s util.StatsSubmitter) (chan<- Context, error) { var err error cs.Submitter = s cs.InChan = make(chan Context, ContextQueueLength) cs.SensorID, err = util.GetSensorID() if err != nil { return nil, err } go func() { for ctx := range cs.InChan { out := make([]interface{}, 0) for _, ctxItem := range ctx { var myItem interface{} err := json.Unmarshal([]byte(ctxItem), &myItem) if err != nil { log.Warnf("could not marshal event JSON: %s", string(ctxItem)) continue } out = append(out, myItem) } chunk := ContextChunk{ Timestamp: time.Now(), SensorID: cs.SensorID, Events: out, } json, err := json.Marshal(chunk) if err != nil { log.Warn(err) continue } s.Submit(json, "context", "application/json") } }() return cs.InChan, nil } fever-1.0.8/processing/context_shipper_amqp_test.go000066400000000000000000000052651353566242200226450ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2019, DCSO GmbH import ( "strings" "testing" "time" "github.com/DCSO/fever/util" "github.com/NeowayLabs/wabbit" "github.com/NeowayLabs/wabbit/amqptest" "github.com/NeowayLabs/wabbit/amqptest/server" log "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" ) func TestContextShipperAMQP(t *testing.T) { serverURL := "amqp://sensor:sensor@localhost:9988/%2f/" log.SetLevel(log.DebugLevel) // start mock server fakeServer := server.NewServer(serverURL) fakeServer.Start() // set up consumer allDone := make(chan bool) coll := make([]string, 0) c, err := util.NewConsumer(serverURL, "context", "direct", "context", "context", "foo-test1", func(d wabbit.Delivery) { coll = append(coll, string(d.Body())) if len(coll) == 4 { allDone <- true } }) if err != nil { t.Fatal(err) } // set up submitter submitter, err := util.MakeAMQPSubmitterWithReconnector(serverURL, "context", true, func(url string) (wabbit.Conn, error) { // we pass in a custom reconnector which uses the amqptest implementation var conn wabbit.Conn conn, err = amqptest.Dial(url) return conn, err }) if err != nil { t.Fatal(err) } cs := &ContextShipperAMQP{} inChan, err := cs.Start(submitter) if err != nil { t.Fatal(err) } inChan <- Context{`{"value":"c1"}`} inChan <- Context{`{"value":"c2"}`} inChan <- Context{`{"value":"c3"}`} inChan <- Context{`{"value":"c4"}`} // ... and wait until they are received and processed <-allDone // check if output is correct if len(coll) != 4 { t.Fail() } if !strings.Contains(coll[0], `"value":"c1"`) { t.Fatalf("value 1 incorrect: %v", coll[0]) } if !strings.Contains(coll[1], `"value":"c2"`) { t.Fatalf("value 2 incorrect: %v", coll[1]) } if !strings.Contains(coll[2], `"value":"c3"`) { t.Fatalf("value 3 incorrect: %v", coll[2]) } if !strings.Contains(coll[3], `"value":"c4"`) { t.Fatalf("value 4 incorrect: %v", coll[3]) } close(inChan) // tear down test setup submitter.Finish() fakeServer.Stop() c.Shutdown() } func TestContextShipperAMQPBrokenJSON(t *testing.T) { cs := &ContextShipperAMQP{} ds, _ := util.MakeDummySubmitter() inChan, err := cs.Start(ds) if err != nil { t.Fatal(err) } hook := test.NewGlobal() var entries []*log.Entry inChan <- Context{`{""value":1}`} for i := 0; i < 60; i++ { time.Sleep(1 * time.Second) entries = hook.AllEntries() if len(entries) > 0 { break } if i > 58 { t.Fatalf("timed out trying to receive error message for malformed JSON") } } close(inChan) if entries[0].Message != `could not marshal event JSON: {""value":1}` { t.Fatalf("wrong error message: %v", entries[0].Message) } } fever-1.0.8/processing/dns_aggregator.go000066400000000000000000000140061353566242200203310ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2017, DCSO GmbH import ( "bytes" "encoding/json" "os" "sync" "time" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" log "github.com/sirupsen/logrus" ) // DNSAggregatorPerfStats contains performance stats written to InfluxDB // for monitoring. type DNSAggregatorPerfStats struct { DNSAggregateRawCount uint64 `influx:"dns_aggregate_raw_count"` DNSAggregateCount uint64 `influx:"dns_aggregate_count"` } // AggregateDNSReplyDetails holds data for a query tuple. type AggregateDNSReplyDetails struct { Rrtype string `json:"rrtype,omitempty"` Rdata string `json:"rdata,omitempty"` Rcode string `json:"rcode,omitempty"` Type string `json:"type,omitempty"` } // AggregatedDNSDetails holds summarized traffic stats for a given // AggregateDNSEvent. type AggregatedDNSDetails struct { Rrname string `json:"rrname,omitempty"` Details []AggregateDNSReplyDetails `json:"rdata,omitempty"` } // AggregateDNSEvent holds aggregated flow data. type AggregateDNSEvent struct { Timestamp []string `json:"timestamp"` EventType string `json:"event_type"` SrcIP []string `json:"src_ip,omitempty"` SrcPort []int `json:"src_port,omitempty"` DestIP []string `json:"dest_ip,omitempty"` DestPort int `json:"dest_port,omitempty"` DNS AggregatedDNSDetails `json:"dns,omitempty"` } // DNSAggregator is an aggregator that groups DNS events with the same // domain name. type DNSAggregator struct { SensorID string Count int64 DNSMutex sync.RWMutex DNS map[string]*AggregateDNSEvent PerfStats DNSAggregatorPerfStats StatsEncoder *util.PerformanceStatsEncoder SrcIPSet map[string]bool DestIPSet map[string]bool AnswerSet map[string]bool StringBuf bytes.Buffer FlushPeriod time.Duration DatabaseOutChan chan types.Entry CloseChan chan bool ClosedChan chan bool Logger *log.Entry } // MakeDNSAggregator creates a new empty DNSAggregator. func MakeDNSAggregator(flushPeriod time.Duration, outChan chan types.Entry) *DNSAggregator { a := &DNSAggregator{ FlushPeriod: flushPeriod, Logger: log.WithFields(log.Fields{ "domain": "dns_aggregate", }), DNS: make(map[string]*AggregateDNSEvent), SrcIPSet: make(map[string]bool), DestIPSet: make(map[string]bool), AnswerSet: make(map[string]bool), DatabaseOutChan: outChan, CloseChan: make(chan bool), ClosedChan: make(chan bool), } a.SensorID, _ = os.Hostname() return a } func (a *DNSAggregator) flush() { // reset live counters a.DNSMutex.Lock() myDNS := a.DNS myCount := a.Count a.DNS = make(map[string]*AggregateDNSEvent) a.SrcIPSet = make(map[string]bool) a.DestIPSet = make(map[string]bool) a.AnswerSet = make(map[string]bool) a.Count = 0 a.PerfStats.DNSAggregateCount = uint64(len(myDNS)) a.PerfStats.DNSAggregateRawCount = uint64(myCount) a.DNSMutex.Unlock() if a.StatsEncoder != nil { a.StatsEncoder.Submit(a.PerfStats) } a.Logger.WithFields(log.Fields{ "agg_dns": a.PerfStats.DNSAggregateCount, "in_dns": a.PerfStats.DNSAggregateRawCount, }).Info("flushing events") for _, v := range myDNS { jsonString, _ := json.Marshal(v) // log.Info(string(jsonString)) newEntry := types.Entry{ Timestamp: v.Timestamp[0], EventType: v.EventType, JSONLine: string(jsonString[:]), } a.DatabaseOutChan <- newEntry } } func (a *DNSAggregator) countRequest(key string, e *types.Entry) { a.DNSMutex.Lock() a.Count++ if _, ok := a.DNS[key]; !ok { a.DNS[key] = &AggregateDNSEvent{ Timestamp: []string{e.Timestamp}, EventType: "dns", SrcIP: []string{e.SrcIP}, SrcPort: []int{int(e.SrcPort)}, DestIP: []string{e.DestIP}, DestPort: int(e.DestPort), DNS: AggregatedDNSDetails{ Rrname: e.DNSRRName, Details: []AggregateDNSReplyDetails{ AggregateDNSReplyDetails{ Rrtype: e.DNSRRType, Rdata: e.DNSRData, Rcode: e.DNSRCode, Type: e.DNSType, }, }, }, } } else { req := a.DNS[key] req.SrcPort = append(req.SrcPort, int(e.SrcPort)) if _, ok := a.SrcIPSet[e.SrcIP]; !ok { req.SrcIP = append(req.SrcIP, e.SrcIP) a.SrcIPSet[e.SrcIP] = true } if _, ok := a.DestIPSet[e.DestIP]; !ok { req.DestIP = append(req.DestIP, e.DestIP) a.DestIPSet[e.DestIP] = true } a.StringBuf.Write([]byte(e.DNSRRType)) a.StringBuf.Write([]byte(e.DNSRData)) a.StringBuf.Write([]byte(e.DNSRCode)) a.StringBuf.Write([]byte(e.DNSType)) if _, ok = a.AnswerSet[a.StringBuf.String()]; !ok { req.DNS.Details = append(req.DNS.Details, AggregateDNSReplyDetails{ Rrtype: e.DNSRRType, Rdata: e.DNSRData, Rcode: e.DNSRCode, Type: e.DNSType, }) } a.StringBuf.Reset() } a.DNSMutex.Unlock() } // Consume processes an Entry, adding the data within to the internal // aggregated state func (a *DNSAggregator) Consume(e *types.Entry) error { a.countRequest(e.DNSRRName, e) return nil } // Run starts the background aggregation service for this handler func (a *DNSAggregator) Run() { go func() { i := 0 * time.Second for { select { case <-a.CloseChan: close(a.ClosedChan) return default: if i >= a.FlushPeriod { a.flush() i = 0 * time.Second } time.Sleep(1 * time.Second) i += 1 * time.Second } } }() } // Stop causes the aggregator to cease aggregating and submitting data func (a *DNSAggregator) Stop(stopChan chan bool) { close(a.CloseChan) <-a.ClosedChan close(stopChan) } // SubmitStats registers a PerformanceStatsEncoder for runtime stats submission. func (a *DNSAggregator) SubmitStats(sc *util.PerformanceStatsEncoder) { a.StatsEncoder = sc } // GetName returns the name of the handler func (a *DNSAggregator) GetName() string { return "DB DNS aggregator" } // GetEventTypes returns a slice of event type strings that this handler // should be applied to func (a *DNSAggregator) GetEventTypes() []string { return []string{"dns"} } fever-1.0.8/processing/dns_aggregator_test.go000066400000000000000000000072321353566242200213730ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2017, 2019, DCSO GmbH import ( "bytes" "encoding/json" "fmt" "math/rand" "sync" "testing" "time" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" ) const ( numTestEvents = 100000 ) func makeDNSEvent() types.Entry { e := types.Entry{ SrcIP: fmt.Sprintf("10.0.0.%d", rand.Intn(5)+1), SrcPort: 53, DestIP: fmt.Sprintf("10.0.0.%d", rand.Intn(50)), DestPort: []int64{11, 12, 13, 14, 15}[rand.Intn(5)], Timestamp: time.Now().Format(types.SuricataTimestampFormat), EventType: "DNS", Proto: "TCP", DNSRCode: []string{"NOERROR", "NXDOMAIN"}[rand.Intn(2)], DNSRData: fmt.Sprintf("10.%d.0.%d", rand.Intn(50), rand.Intn(50)+100), DNSRRName: fmt.Sprintf("%s.com", util.RandStringBytesMaskImprSrc(4)), DNSRRType: "answer", } return e } func TestDNSAggregator(t *testing.T) { rand.Seed(time.Now().UTC().UnixNano()) outChan := make(chan types.Entry) consumeWaitChan := make(chan bool) closeChan := make(chan bool) f := MakeDNSAggregator(1*time.Second, outChan) daTypes := f.GetEventTypes() if len(daTypes) != 1 { t.Fatal("DNS aggregation handler should only claim one type") } if daTypes[0] != "dns" { t.Fatal("DNS aggregation handler should only claim 'dns' type") } if f.GetName() != "DB DNS aggregator" { t.Fatal("DNS aggregation handler has wrong name") } var observedLock sync.Mutex observedSituations := make(map[string]int) observedDomains := make(map[string]bool) setupSituations := make(map[string]int) setupDomains := make(map[string]bool) go func() { var buf bytes.Buffer for { select { case e := <-outChan: var out AggregateDNSEvent err := json.Unmarshal([]byte(e.JSONLine), &out) if err != nil { t.Fail() } for _, v := range out.DNS.Details { buf.Write([]byte(out.DNS.Rrname)) buf.Write([]byte(v.Rrtype)) buf.Write([]byte(v.Rdata)) buf.Write([]byte(v.Rcode)) observedLock.Lock() observedSituations[buf.String()]++ observedLock.Unlock() observedDomains[out.DNS.Rrname] = true buf.Reset() } case <-closeChan: close(consumeWaitChan) return } } }() f.Run() for i := 0; i < numTestEvents; i++ { var buf bytes.Buffer ev := makeDNSEvent() buf.Write([]byte(ev.DNSRRName)) buf.Write([]byte(ev.DNSRRType)) buf.Write([]byte(ev.DNSRData)) buf.Write([]byte(ev.DNSRCode)) setupSituations[buf.String()]++ setupDomains[ev.DNSRRName] = true buf.Reset() f.Consume(&ev) } go func() { for { observedLock.Lock() if len(setupSituations) <= len(observedSituations) { observedLock.Unlock() break } observedLock.Unlock() time.Sleep(100 * time.Millisecond) } close(closeChan) }() <-consumeWaitChan close(outChan) waitChan := make(chan bool) f.Stop(waitChan) <-waitChan if len(setupSituations) != len(observedSituations) { t.Fatalf("results have different dimensions: %d/%d", len(setupSituations), len(observedSituations)) } for k, v := range setupSituations { if _, ok := observedSituations[k]; !ok { t.Fatalf("missing key: %s", k) } v2 := observedSituations[k] if v2 != v { t.Fatalf("mismatching counts for key %s: %d/%d", k, v, v2) } } for k, v := range observedSituations { if _, ok := setupSituations[k]; !ok { t.Fatalf("missing key: %s", k) } v2 := setupSituations[k] if v2 != v { t.Fatalf("mismatching counts for key %s: %d/%d", k, v, v2) } } if len(setupDomains) != len(observedDomains) { t.Fatalf("results have different dimensions: %d/%d", len(setupDomains), len(observedDomains)) } for k := range observedDomains { if _, ok := setupDomains[k]; !ok { t.Fatalf("missing key: %s", k) } } } fever-1.0.8/processing/event_profiler.go000066400000000000000000000076761353566242200204050ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2018, DCSO GmbH import ( "bytes" "fmt" "os" "os/exec" "sync" "time" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" log "github.com/sirupsen/logrus" ) // EventProfile contains counts per event_type such as occurrences and // JSON size. type EventProfile struct { CountMap map[string]uint64 SizeMap map[string]uint64 } // EventProfiler counts EVE event type statistics, such as number and size // of JSON data received from the input. type EventProfiler struct { SensorID string Host string Profile EventProfile FlushPeriod time.Duration ProfileMutex sync.Mutex CloseChan chan bool ClosedChan chan bool Logger *log.Entry Submitter util.StatsSubmitter SubmitChannel chan []byte } func getFQDN() (fqdn string) { cmd := exec.Command("/bin/hostname", "-f") var out bytes.Buffer cmd.Stdout = &out err := cmd.Run() if err != nil { log.Warn(err) host, err := os.Hostname() if err != nil { return "unknown" } return host } fqdn = out.String() if len(fqdn) > 1 { fqdn = fqdn[:len(fqdn)-1] } else { fqdn = "unknown" } return fqdn } // MakeEventProfiler creates a new EventProfiler. func MakeEventProfiler(flushPeriod time.Duration, submitter util.StatsSubmitter) (*EventProfiler, error) { sensorID, err := util.GetSensorID() if err != nil { return nil, err } a := &EventProfiler{ FlushPeriod: flushPeriod, Logger: log.WithFields(log.Fields{ "domain": "eventprofiler", }), Profile: EventProfile{ CountMap: make(map[string]uint64), SizeMap: make(map[string]uint64), }, CloseChan: make(chan bool), ClosedChan: make(chan bool), SubmitChannel: make(chan []byte, 60), Submitter: submitter, SensorID: sensorID, } a.SensorID, _ = os.Hostname() a.Host = getFQDN() return a, nil } func (a *EventProfiler) formatLineProtocol() string { out := "" a.ProfileMutex.Lock() myProfile := a.Profile first := true for k, v := range myProfile.SizeMap { if !first { out += "," } else { first = false } out += fmt.Sprintf("size.%s=%d", k, v) } for k, v := range myProfile.CountMap { out += fmt.Sprintf(",count.%s=%d", k, v) } a.ProfileMutex.Unlock() if out == "" { return "" } return fmt.Sprintf("%s,host=%s %s %d", util.ToolName, a.Host, out, uint64(time.Now().UnixNano())) } func (a *EventProfiler) flush() { lineString := a.formatLineProtocol() if lineString == "" { return } select { case a.SubmitChannel <- []byte(lineString): break default: log.Warning("channel is full, cannot submit message...") } } // Consume processes an Entry, adding the data within to the internal // aggregated state func (a *EventProfiler) Consume(e *types.Entry) error { etype := e.EventType a.ProfileMutex.Lock() a.Profile.CountMap[etype]++ a.Profile.SizeMap[etype] += uint64(len(e.JSONLine)) a.ProfileMutex.Unlock() return nil } // Run starts the background aggregation service for this handler func (a *EventProfiler) Run() { go func() { for message := range a.SubmitChannel { a.Submitter.SubmitWithHeaders(message, "", "text/plain", map[string]string{ "database": "telegraf", "retention_policy": "default", }) } }() go func() { i := 0 * time.Second for { select { case <-a.CloseChan: close(a.SubmitChannel) close(a.ClosedChan) return default: if i >= a.FlushPeriod { a.flush() i = 0 * time.Second } time.Sleep(1 * time.Second) i += 1 * time.Second } } }() } // Stop causes the aggregator to cease aggregating and submitting data func (a *EventProfiler) Stop(stopChan chan bool) { close(a.CloseChan) <-a.ClosedChan close(stopChan) } // GetName returns the name of the handler func (a *EventProfiler) GetName() string { return "Event profiler" } // GetEventTypes returns a slice of event type strings that this handler // should be applied to func (a *EventProfiler) GetEventTypes() []string { return []string{"*"} } fever-1.0.8/processing/flow_aggregator.go000066400000000000000000000123311353566242200205130ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2017, DCSO GmbH import ( "bytes" "encoding/json" "os" "sync" "time" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" log "github.com/sirupsen/logrus" ) // FlowAggregatorPerfStats contains performance stats written to InfluxDB // for monitoring. type FlowAggregatorPerfStats struct { FlowAggregateRawCount uint64 `influx:"flow_aggregate_raw_count"` FlowAggregateCount uint64 `influx:"flow_aggregate_count"` } // AggregatedFlowDetails holds summarized traffic stats for a given // AggregateFlowEvent. type AggregatedFlowDetails struct { PktsToserver int64 `json:"pkts_toserver"` PktsToclient int64 `json:"pkts_toclient"` BytesToserver int64 `json:"bytes_toserver"` BytesToclient int64 `json:"bytes_toclient"` } // AggregateFlowEvent holds aggregated flow data. type AggregateFlowEvent struct { Timestamp []string `json:"timestamp"` EventType string `json:"event_type"` SrcIP string `json:"src_ip,omitempty"` SrcPort []int `json:"src_port,omitempty"` DestIP string `json:"dest_ip,omitempty"` DestPort int `json:"dest_port,omitempty"` Flow AggregatedFlowDetails `json:"flow,omitempty"` } // FlowAggregator is an aggregator that groups flows with the same combination // of srcIP/destIP/destPort. type FlowAggregator struct { SensorID string Count int64 FlowsMutex sync.RWMutex Flows map[string]*AggregateFlowEvent PerfStats FlowAggregatorPerfStats StatsEncoder *util.PerformanceStatsEncoder FlushPeriod time.Duration StringBuf bytes.Buffer DatabaseOutChan chan types.Entry CloseChan chan bool ClosedChan chan bool Logger *log.Entry } // MakeFlowAggregator creates a new empty FlowAggregator. func MakeFlowAggregator(flushPeriod time.Duration, outChan chan types.Entry) *FlowAggregator { a := &FlowAggregator{ FlushPeriod: flushPeriod, Logger: log.WithFields(log.Fields{ "domain": "flow_aggregate", }), Flows: make(map[string]*AggregateFlowEvent), DatabaseOutChan: outChan, CloseChan: make(chan bool), ClosedChan: make(chan bool), } a.SensorID, _ = os.Hostname() return a } func (a *FlowAggregator) flush() { a.FlowsMutex.Lock() myFlows := a.Flows myCount := a.Count a.Flows = make(map[string]*AggregateFlowEvent) a.Count = 0 a.PerfStats.FlowAggregateRawCount = uint64(myCount) a.PerfStats.FlowAggregateCount = uint64(len(myFlows)) a.FlowsMutex.Unlock() if a.StatsEncoder != nil { a.StatsEncoder.Submit(a.PerfStats) } a.Logger.WithFields(log.Fields{ "agg_flows": a.PerfStats.FlowAggregateCount, "in_flows": a.PerfStats.FlowAggregateRawCount, }).Info("flushing events") for _, v := range myFlows { jsonString, _ := json.Marshal(v) newEntry := types.Entry{ SrcIP: v.SrcIP, SrcPort: int64(v.SrcPort[0]), DestIP: v.DestIP, DestPort: int64(v.DestPort), Timestamp: v.Timestamp[0], EventType: v.EventType, JSONLine: string(jsonString[:]), } a.DatabaseOutChan <- newEntry } } func (a *FlowAggregator) countFlow(key string, e *types.Entry) { a.FlowsMutex.Lock() a.Count++ if _, ok := a.Flows[key]; !ok { a.Flows[key] = &AggregateFlowEvent{ Timestamp: []string{e.Timestamp}, EventType: "flow", SrcIP: e.SrcIP, SrcPort: []int{int(e.SrcPort)}, DestIP: e.DestIP, DestPort: int(e.DestPort), Flow: AggregatedFlowDetails{ PktsToserver: e.PktsToServer, PktsToclient: e.PktsToClient, BytesToserver: e.BytesToServer, BytesToclient: e.BytesToClient, }, } } else { flow := a.Flows[key] flow.SrcPort = append(flow.SrcPort, int(e.SrcPort)) flow.Flow.PktsToserver += e.PktsToServer flow.Flow.PktsToclient += e.PktsToClient flow.Flow.BytesToserver += e.BytesToServer flow.Flow.BytesToclient += e.BytesToClient } a.FlowsMutex.Unlock() } // Consume processes an Entry, adding the data within to the internal // aggregated state func (a *FlowAggregator) Consume(e *types.Entry) error { a.StringBuf.Write([]byte(e.SrcIP)) a.StringBuf.Write([]byte(e.DestIP)) a.StringBuf.Write([]byte(string(e.DestPort))) a.countFlow(a.StringBuf.String(), e) a.StringBuf.Reset() return nil } // Run starts the background aggregation service for this handler func (a *FlowAggregator) Run() { go func() { i := 0 * time.Second for { select { case <-a.CloseChan: close(a.ClosedChan) return default: if i >= a.FlushPeriod { a.flush() i = 0 * time.Second } time.Sleep(1 * time.Second) i += 1 * time.Second } } }() } // SubmitStats registers a PerformanceStatsEncoder for runtime stats submission. func (a *FlowAggregator) SubmitStats(sc *util.PerformanceStatsEncoder) { a.StatsEncoder = sc } // Stop causes the aggregator to cease aggregating and submitting data func (a *FlowAggregator) Stop(stopChan chan bool) { close(a.CloseChan) <-a.ClosedChan close(stopChan) } // GetName returns the name of the handler func (a *FlowAggregator) GetName() string { return "DB flow aggregator" } // GetEventTypes returns a slice of event type strings that this handler // should be applied to func (a *FlowAggregator) GetEventTypes() []string { return []string{"flow"} } fever-1.0.8/processing/flow_aggregator_test.go000066400000000000000000000144351353566242200215610ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2017, 2019, DCSO GmbH import ( "encoding/json" "fmt" "math/rand" "sync" "testing" "time" "github.com/DCSO/fever/types" ) const ( numOfTestFlowItems = 200000 ) func makeFlowEvent() types.Entry { e := types.Entry{ SrcIP: fmt.Sprintf("10.0.0.%d", rand.Intn(250)), SrcPort: []int64{1, 2, 3, 4, 5}[rand.Intn(5)], DestIP: fmt.Sprintf("10.0.0.%d", rand.Intn(250)), DestPort: []int64{11, 12, 13, 14, 15}[rand.Intn(5)], Timestamp: time.Now().Format(types.SuricataTimestampFormat), EventType: "flow", Proto: "TCP", BytesToClient: int64(rand.Intn(10000)), BytesToServer: int64(rand.Intn(10000)), PktsToClient: int64(rand.Intn(100)), PktsToServer: int64(rand.Intn(100)), } jsonBytes, _ := json.Marshal(e) e.JSONLine = string(jsonBytes) return e } func TestFlowAggregator(t *testing.T) { rand.Seed(time.Now().UTC().UnixNano()) outChan := make(chan types.Entry) feedWaitChan := make(chan bool) closeChan := make(chan bool) f := MakeFlowAggregator(1*time.Second, outChan) var procFlowsLock sync.Mutex var processedFlows int var eTotalPktsToClient int64 var eTotalPktsToServer int64 var eTotalBytesToClient int64 var eTotalBytesToServer int64 var rTotalPktsToClient int64 var rTotalPktsToServer int64 var rTotalBytesToClient int64 var rTotalBytesToServer int64 go func(pc *int64, ps *int64, bc *int64, bs *int64) { for { select { case e := <-outChan: var out struct { SrcPort []int `json:"src_port"` Flow struct { BytesToServer int64 `json:"bytes_toserver"` BytesToClient int64 `json:"bytes_toclient"` PktsToServer int64 `json:"pkts_toserver"` PktsToClient int64 `json:"pkts_toclient"` } `json:"flow"` } err := json.Unmarshal([]byte(e.JSONLine), &out) if err != nil { t.Fail() } // we count the source ports to determine the number of // aggregated flows procFlowsLock.Lock() processedFlows += len(out.SrcPort) procFlowsLock.Unlock() *bc += out.Flow.BytesToClient *bs += out.Flow.BytesToServer *pc += out.Flow.PktsToClient *ps += out.Flow.PktsToServer case <-closeChan: close(feedWaitChan) return } } }(&rTotalPktsToClient, &rTotalPktsToServer, &rTotalBytesToClient, &rTotalBytesToServer) f.Run() for i := 0; i < numOfTestFlowItems; i++ { ev := makeFlowEvent() eTotalBytesToClient += ev.BytesToClient eTotalBytesToServer += ev.BytesToServer eTotalPktsToClient += ev.PktsToClient eTotalPktsToServer += ev.PktsToServer f.Consume(&ev) } go func() { for { procFlowsLock.Lock() if processedFlows == numOfTestFlowItems { procFlowsLock.Unlock() break } procFlowsLock.Unlock() time.Sleep(100 * time.Millisecond) } close(closeChan) }() <-feedWaitChan consumeWaitChan := make(chan bool) f.Stop(consumeWaitChan) <-consumeWaitChan if eTotalBytesToClient != rTotalBytesToClient { t.Fatalf("total bytes to client differ: %d/%d", eTotalBytesToClient, rTotalBytesToClient) } if eTotalBytesToServer != rTotalBytesToServer { t.Fatalf("total bytes to server differ: %d/%d", eTotalBytesToServer, rTotalBytesToServer) } if eTotalPktsToClient != rTotalPktsToClient { t.Fatalf("total pkts to client differ: %d/%d", eTotalPktsToClient, rTotalPktsToClient) } if eTotalPktsToServer != rTotalPktsToServer { t.Fatalf("total pkts to server differ: %d/%d", eTotalPktsToServer, rTotalPktsToServer) } } func TestFlowAggregatorWithDispatch(t *testing.T) { rand.Seed(time.Now().UTC().UnixNano()) outChan := make(chan types.Entry) dbChan := make(chan types.Entry, numOfTestFlowItems) feedWaitChan := make(chan bool) closeChan := make(chan bool) f := MakeFlowAggregator(1*time.Second, outChan) var procFlowsLock sync.Mutex var processedFlows int var eTotalPktsToClient int64 var eTotalPktsToServer int64 var eTotalBytesToClient int64 var eTotalBytesToServer int64 var rTotalPktsToClient int64 var rTotalPktsToServer int64 var rTotalBytesToClient int64 var rTotalBytesToServer int64 go func(pc *int64, ps *int64, bc *int64, bs *int64) { for { select { case e := <-outChan: var out struct { SrcPort []int `json:"src_port"` Flow struct { BytesToServer int64 `json:"bytes_toserver"` BytesToClient int64 `json:"bytes_toclient"` PktsToServer int64 `json:"pkts_toserver"` PktsToClient int64 `json:"pkts_toclient"` } `json:"flow"` } err := json.Unmarshal([]byte(e.JSONLine), &out) if err != nil { t.Fail() } // we count the source ports to determine the number of // aggregated flows procFlowsLock.Lock() processedFlows += len(out.SrcPort) procFlowsLock.Unlock() *bc += out.Flow.BytesToClient *bs += out.Flow.BytesToServer *pc += out.Flow.PktsToClient *ps += out.Flow.PktsToServer case <-closeChan: close(feedWaitChan) return } } }(&rTotalPktsToClient, &rTotalPktsToServer, &rTotalBytesToClient, &rTotalBytesToServer) d := MakeHandlerDispatcher(dbChan) d.RegisterHandler(f) f.Run() for i := 0; i < numOfTestFlowItems; i++ { ev := makeFlowEvent() eTotalBytesToClient += ev.BytesToClient eTotalBytesToServer += ev.BytesToServer eTotalPktsToClient += ev.PktsToClient eTotalPktsToServer += ev.PktsToServer d.Dispatch(&ev) } go func() { for { procFlowsLock.Lock() if processedFlows == numOfTestFlowItems { procFlowsLock.Unlock() break } procFlowsLock.Unlock() time.Sleep(100 * time.Millisecond) } close(closeChan) }() <-feedWaitChan consumeWaitChan := make(chan bool) f.Stop(consumeWaitChan) <-consumeWaitChan if len(dbChan) != numOfTestFlowItems { t.Fatalf("not all input events forwarded: %d", len(dbChan)) } close(dbChan) if eTotalBytesToClient != rTotalBytesToClient { t.Fatalf("total bytes to client differ: %d/%d", eTotalBytesToClient, rTotalBytesToClient) } if eTotalBytesToServer != rTotalBytesToServer { t.Fatalf("total bytes to server differ: %d/%d", eTotalBytesToServer, rTotalBytesToServer) } if eTotalPktsToClient != rTotalPktsToClient { t.Fatalf("total pkts to client differ: %d/%d", eTotalPktsToClient, rTotalPktsToClient) } if eTotalPktsToServer != rTotalPktsToServer { t.Fatalf("total pkts to server differ: %d/%d", eTotalPktsToServer, rTotalPktsToServer) } } fever-1.0.8/processing/flow_extractor.go000066400000000000000000000072641353566242200204150ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2017, DCSO GmbH import ( "bytes" "os" "strings" "sync" "time" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" "github.com/DCSO/bloom" log "github.com/sirupsen/logrus" ) // FlowExtractor is an aggregator that extracts the flows from // "hosts of interest" and sends them to the backend. type FlowExtractor struct { SensorID string BloomPath string BloomFilter *bloom.BloomFilter FlowsMutex sync.RWMutex flowCount int Flows *bytes.Buffer SubmitChannel chan []byte Submitter util.StatsSubmitter FlushPeriod time.Duration FlushCount int CloseChan chan bool ClosedChan chan bool Logger *log.Entry } // MakeFlowExtractor creates a new empty FlowExtractor. func MakeFlowExtractor(flushPeriod time.Duration, flushCount int, bloomPath string, submitter util.StatsSubmitter) (*FlowExtractor, error) { var bloomFilter *bloom.BloomFilter if bloomPath != "" { compressed := false if strings.HasSuffix(bloomPath, ".gz") { compressed = true } var err error bloomFilter, err = bloom.LoadFilter(bloomPath, compressed) if err != nil { return nil, err } } fe := &FlowExtractor{ FlushPeriod: flushPeriod, Submitter: submitter, BloomPath: bloomPath, Logger: log.WithFields(log.Fields{ "domain": "flow_extractor", }), Flows: new(bytes.Buffer), SubmitChannel: make(chan []byte, 60), BloomFilter: bloomFilter, CloseChan: make(chan bool), ClosedChan: make(chan bool), FlushCount: flushCount, flowCount: 0, } fe.SensorID, _ = os.Hostname() return fe, nil } func (fe *FlowExtractor) flush() { fe.FlowsMutex.Lock() myFlows := fe.Flows fe.Flows = new(bytes.Buffer) fe.flowCount = 0 fe.FlowsMutex.Unlock() select { case fe.SubmitChannel <- myFlows.Bytes(): break default: log.Warning("Flow channel is full, cannot submit message...") } } // Consume processes an Entry, adding the data within to the flows func (fe *FlowExtractor) Consume(e *types.Entry) error { fe.FlowsMutex.Lock() defer fe.FlowsMutex.Unlock() if fe.BloomFilter != nil { if !fe.BloomFilter.Check([]byte(e.SrcIP)) && !fe.BloomFilter.Check([]byte(e.DestIP)) { return nil } } var fev types.FlowEvent err := fev.FromEntry(e) if err != nil { return err } err = fev.Marshal(fe.Flows) fe.flowCount++ return err } // Run starts the background aggregation service for this handler func (fe *FlowExtractor) Run() { //this goroutine asynchronously submit flow messages go func() { for message := range fe.SubmitChannel { fe.Submitter.Submit(message, "", "application/binary-flows") } }() //this go routine takes care of flushing the flows go func() { i := 0 * time.Second interval := 100 * time.Millisecond for { select { case <-fe.CloseChan: close(fe.SubmitChannel) close(fe.ClosedChan) return default: //we flush if the flush period has passed, or if the count //of events is larger then the flush count fe.FlowsMutex.Lock() flowCount := fe.flowCount fe.FlowsMutex.Unlock() if i >= fe.FlushPeriod || flowCount > fe.FlushCount { fe.flush() i = 0 * time.Second } time.Sleep(interval) i += interval } } }() } // Stop causes the aggregator to cease aggregating and submitting data func (fe *FlowExtractor) Stop(stopChan chan bool) { close(fe.CloseChan) <-fe.ClosedChan close(stopChan) } // GetName returns the name of the handler func (fe *FlowExtractor) GetName() string { return "Flow extractor" } // GetEventTypes returns a slice of event type strings that this handler // should be applied to func (fe *FlowExtractor) GetEventTypes() []string { return []string{"flow"} } fever-1.0.8/processing/flow_extractor_test.go000066400000000000000000000115651353566242200214530ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2017, 2019, DCSO GmbH import ( "github.com/NeowayLabs/wabbit" "github.com/NeowayLabs/wabbit/amqptest" "github.com/NeowayLabs/wabbit/amqptest/server" "github.com/DCSO/bloom" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" "bytes" "fmt" "math/rand" "reflect" "sync" "testing" "time" ) const ( numFlowExtractorEvents = 100000 ) func makeFlowExtractorEvent(ipv6 bool) types.Entry { protos := []string{"TCP", "UDP"} n := rand.Int() % len(protos) var srcIP, destIP string if !ipv6 { srcIP = fmt.Sprintf("10.0.0.%d", rand.Intn(50)) destIP = fmt.Sprintf("10.0.0.%d", rand.Intn(50)) } else { srcIP = fmt.Sprintf("2001:0db8:85a3:0000:0000:8a2e:0370:7334") destIP = fmt.Sprintf("2001:0db8:85a3:0000:0000:8a2e:0370:7334") } e := types.Entry{ SrcIP: srcIP, SrcPort: []int64{1, 2, 3, 4, 5}[rand.Intn(5)], DestIP: destIP, DestPort: []int64{11, 12, 13, 14, 15}[rand.Intn(5)], Timestamp: time.Now().Format(types.SuricataTimestampFormat), EventType: "flow", Proto: protos[n], BytesToClient: int64(rand.Intn(10000)), BytesToServer: int64(rand.Intn(10000)), PktsToClient: int64(rand.Intn(100)), PktsToServer: int64(rand.Intn(100)), } return e } func makeBloomFilter() *bloom.BloomFilter { bf := bloom.Initialize(10000, 1e-10) for i := 0; i < 10000; i++ { bf.Add([]byte(fmt.Sprintf("10.0.0.%d", rand.Intn(50)))) } bf.Add([]byte("2001:0db8:85a3:0000:0000:8a2e:0370:7334")) return &bf } func TestFlowExtractor(t *testing.T) { serverURL := "amqp://sensor:sensor@127.0.0.1:11111/%2f/" // start mock AMQP server fakeServer := server.NewServer(serverURL) fakeServer.Start() defer fakeServer.Stop() // set up consumer results := make([]string, 0) var resultsLock sync.Mutex c, err := util.NewConsumer(serverURL, "tdh.flows", "direct", "tdh.flows.testqueue", "", "", func(d wabbit.Delivery) { resultsLock.Lock() results = append(results, string(d.Body())) resultsLock.Unlock() }) if err != nil { t.Fatal(err) } defer c.Shutdown() // set up submitter submitter, err := util.MakeAMQPSubmitterWithReconnector(serverURL, "tdh.flows", true, func(url string) (wabbit.Conn, error) { var conn wabbit.Conn conn, err = amqptest.Dial(url) return conn, err }) if err != nil { t.Fatal(err) } defer submitter.Finish() mla, err := MakeFlowExtractor(1*time.Second, 100, "", submitter) mla.BloomFilter = makeBloomFilter() if err != nil { t.Fatal(err) } mla.Run() expectedFlows := make([]types.Entry, 0) for i := 0; i < numFlowExtractorEvents; i++ { ipv6 := false //we mix in some IPv6 packets... if rand.Intn(2) == 0 { ipv6 = true } ev := makeFlowExtractorEvent(ipv6) err := mla.Consume(&ev) if err != nil { t.Fatal(err) } if mla.BloomFilter.Check([]byte(ev.SrcIP)) || mla.BloomFilter.Check([]byte(ev.DestIP)) { expectedFlows = append(expectedFlows, ev) } } var flows []types.FlowEvent CheckLoop: for { flows = make([]types.FlowEvent, 0) resultsLock.Lock() for i := range results { result := results[i] buffer := bytes.NewBufferString(result) for { var fe types.FlowEvent err := fe.Unmarshal(buffer) if err != nil { break } flows = append(flows, fe) if len(flows) == len(expectedFlows) { break CheckLoop } } } resultsLock.Unlock() time.Sleep(100 * time.Millisecond) } stopChan := make(chan bool) mla.Stop(stopChan) <-stopChan if len(flows) != len(expectedFlows) { t.Fatalf("Error: Expected %d flows, got %d!", len(expectedFlows), len(flows)) } for i := range flows { flow := flows[i] expectedEntry := expectedFlows[i] var expectedFlow types.FlowEvent expectedFlow.FromEntry(&expectedEntry) if !reflect.DeepEqual(flow, expectedFlow) { t.Errorf("Flows do not match!") if flow.Format != expectedFlow.Format { t.Errorf("Formats do not match!") } if flow.Timestamp != expectedFlow.Timestamp { t.Errorf("Timestamps do not match!") } if !bytes.Equal(flow.SrcIP, expectedFlow.SrcIP) { t.Errorf("Source IPs do not match!") } if !bytes.Equal(flow.DestIP, expectedFlow.DestIP) { t.Errorf("Destination IPs do not match!") } if flow.SrcPort != expectedFlow.SrcPort { t.Errorf("Source Ports do not match!") } if flow.DestPort != expectedFlow.DestPort { t.Errorf("Destination Ports do not match!") } if flow.Flags != expectedFlow.Flags { t.Errorf("Flags do not match!") } if flow.BytesToServer != expectedFlow.BytesToServer { t.Errorf("BytesToServer do not match!") } if flow.BytesToClient != expectedFlow.BytesToClient { t.Errorf("BytesToClient do not match!") } if flow.PktsToServer != expectedFlow.PktsToServer { t.Errorf("PktsToServer do not match!") } if flow.PktsToClient != expectedFlow.PktsToClient { t.Errorf("PktsToClient do not match!") } } } } fever-1.0.8/processing/forward_handler.go000066400000000000000000000152731353566242200205130ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2017, 2019, DCSO GmbH import ( "encoding/json" "net" "sync" "time" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" log "github.com/sirupsen/logrus" ) // ForwardHandlerPerfStats contains performance stats written to InfluxDB // for monitoring. type ForwardHandlerPerfStats struct { ForwardedPerSec uint64 `influx:"forwarded_events_per_sec"` } // ForwardHandler is a handler that processes events by writing their JSON // representation into a UNIX socket. This is limited by a list of allowed // event types to be forwarded. type ForwardHandler struct { Logger *log.Entry DoRDNS bool RDNSHandler *RDNSHandler ContextCollector *ContextCollector ForwardEventChan chan []byte OutputSocket string OutputConn net.Conn Reconnecting bool ReconnLock sync.Mutex ReconnectNotifyChan chan bool StopReconnectChan chan bool ReconnectTimes int PerfStats ForwardHandlerPerfStats StatsEncoder *util.PerformanceStatsEncoder StopChan chan bool StoppedChan chan bool StopCounterChan chan bool StoppedCounterChan chan bool Running bool Lock sync.Mutex } func (fh *ForwardHandler) reconnectForward() { for range fh.ReconnectNotifyChan { var i int log.Info("Reconnecting to forwarding socket...") outputConn, myerror := net.Dial("unix", fh.OutputSocket) fh.ReconnLock.Lock() if !fh.Reconnecting { fh.Reconnecting = true } else { fh.ReconnLock.Unlock() continue } fh.ReconnLock.Unlock() for i = 0; (fh.ReconnectTimes == 0 || i < fh.ReconnectTimes) && myerror != nil; i++ { select { case <-fh.StopReconnectChan: return default: log.WithFields(log.Fields{ "domain": "forward", "retry": i + 1, "maxretries": fh.ReconnectTimes, }).Warnf("error connecting to output socket, retrying: %s", myerror) time.Sleep(10 * time.Second) outputConn, myerror = net.Dial("unix", fh.OutputSocket) } } if myerror != nil { log.WithFields(log.Fields{ "domain": "forward", "retries": i, }).Fatalf("permanent error connecting to output socket: %s", myerror) } else { if i > 0 { log.WithFields(log.Fields{ "domain": "forward", "retry_attempts": i, }).Infof("connection to output socket successful") } fh.Lock.Lock() fh.OutputConn = outputConn fh.Lock.Unlock() fh.ReconnLock.Lock() fh.Reconnecting = false fh.ReconnLock.Unlock() } } } func (fh *ForwardHandler) runForward() { var err error for { select { case <-fh.StopChan: close(fh.StoppedChan) return default: for item := range fh.ForwardEventChan { select { case <-fh.StopChan: close(fh.StoppedChan) return default: fh.ReconnLock.Lock() if fh.Reconnecting { fh.ReconnLock.Unlock() continue } fh.ReconnLock.Unlock() fh.Lock.Lock() if fh.OutputConn != nil { _, err = fh.OutputConn.Write(item) if err != nil { fh.OutputConn.Close() log.Warn(err) fh.ReconnectNotifyChan <- true fh.Lock.Unlock() continue } _, err = fh.OutputConn.Write([]byte("\n")) if err != nil { fh.OutputConn.Close() log.Warn(err) fh.Lock.Unlock() continue } } fh.Lock.Unlock() } } } } } func (fh *ForwardHandler) runCounter() { sTime := time.Now() for { time.Sleep(500 * time.Millisecond) select { case <-fh.StopCounterChan: close(fh.StoppedCounterChan) return default: if fh.StatsEncoder == nil || time.Since(sTime) < fh.StatsEncoder.SubmitPeriod { continue } fh.Lock.Lock() fh.PerfStats.ForwardedPerSec /= uint64(fh.StatsEncoder.SubmitPeriod.Seconds()) fh.StatsEncoder.Submit(fh.PerfStats) fh.PerfStats.ForwardedPerSec = 0 sTime = time.Now() fh.Lock.Unlock() } } } // MakeForwardHandler creates a new forwarding handler func MakeForwardHandler(reconnectTimes int, outputSocket string) *ForwardHandler { fh := &ForwardHandler{ Logger: log.WithFields(log.Fields{ "domain": "forward", }), OutputSocket: outputSocket, ReconnectTimes: reconnectTimes, ReconnectNotifyChan: make(chan bool), StopReconnectChan: make(chan bool), } return fh } // Consume processes an Entry and forwards it func (fh *ForwardHandler) Consume(e *types.Entry) error { doForwardThis := util.ForwardAllEvents || util.AllowType(e.EventType) if doForwardThis { var ev types.EveOutEvent err := json.Unmarshal([]byte(e.JSONLine), &ev) if err != nil { return err } if GlobalContextCollector != nil && e.EventType == types.EventTypeAlert { GlobalContextCollector.Mark(string(e.FlowID)) } if fh.DoRDNS && fh.RDNSHandler != nil { err = fh.RDNSHandler.Consume(e) if err != nil { return err } ev.SrcHost = e.SrcHosts ev.DestHost = e.DestHosts } var jsonCopy []byte jsonCopy, err = json.Marshal(ev) if err != nil { return err } fh.ForwardEventChan <- jsonCopy fh.Lock.Lock() fh.PerfStats.ForwardedPerSec++ fh.Lock.Unlock() } return nil } // GetName returns the name of the handler func (fh *ForwardHandler) GetName() string { return "Forwarding handler" } // GetEventTypes returns a slice of event type strings that this handler // should be applied to func (fh *ForwardHandler) GetEventTypes() []string { if util.ForwardAllEvents { return []string{"*"} } return util.GetAllowedTypes() } // EnableRDNS switches on reverse DNS enrichment for source and destination // IPs in outgoing EVE events. func (fh *ForwardHandler) EnableRDNS(expiryPeriod time.Duration) { fh.DoRDNS = true fh.RDNSHandler = MakeRDNSHandler(util.NewHostNamer(expiryPeriod, 2*expiryPeriod)) } // Run starts forwarding of JSON representations of all consumed events func (fh *ForwardHandler) Run() { if !fh.Running { fh.StopChan = make(chan bool) fh.ForwardEventChan = make(chan []byte, 10000) fh.StopCounterChan = make(chan bool) fh.StoppedCounterChan = make(chan bool) go fh.reconnectForward() fh.ReconnectNotifyChan <- true go fh.runForward() go fh.runCounter() fh.Running = true } } // Stop stops forwarding of JSON representations of all consumed events func (fh *ForwardHandler) Stop(stoppedChan chan bool) { if fh.Running { close(fh.StopCounterChan) <-fh.StoppedCounterChan fh.StoppedChan = stoppedChan fh.Lock.Lock() fh.OutputConn.Close() fh.Lock.Unlock() close(fh.StopReconnectChan) close(fh.StopChan) close(fh.ForwardEventChan) fh.Running = false } } // SubmitStats registers a PerformanceStatsEncoder for runtime stats submission. func (fh *ForwardHandler) SubmitStats(sc *util.PerformanceStatsEncoder) { fh.StatsEncoder = sc } fever-1.0.8/processing/forward_handler_test.go000066400000000000000000000132171353566242200215460ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2017, 2019, DCSO GmbH import ( "bufio" "encoding/json" "fmt" "io" "io/ioutil" "math/rand" "net" "os" "path/filepath" "sync" "testing" "time" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" log "github.com/sirupsen/logrus" ) func makeEvent(eType string, tag string) types.Entry { e := types.Entry{ SrcIP: fmt.Sprintf("10.0.0.%d", rand.Intn(5)+1), SrcPort: 53, DestIP: fmt.Sprintf("10.0.0.%d", rand.Intn(50)), DestPort: []int64{11, 12, 13, 14, 15}[rand.Intn(5)], Timestamp: time.Now().Format(types.SuricataTimestampFormat), EventType: eType, Proto: "TCP", } eve := types.EveEvent{ EventType: e.EventType, SrcIP: e.SrcIP, SrcPort: int(e.SrcPort), DestIP: e.DestIP, DestPort: int(e.DestPort), Proto: e.Proto, DNS: &types.DNSEvent{ Rrname: tag, }, } json, err := json.Marshal(eve) if err != nil { log.Warn(err) } else { e.JSONLine = string(json) } return e } func consumeSocket(inputListener net.Listener, stopChan chan bool, stoppedChan chan bool, t *testing.T, coll *[]string, wg *sync.WaitGroup) { for { select { case <-stopChan: close(stoppedChan) return default: var conn net.Conn inputListener.(*net.UnixListener).SetDeadline(time.Now().Add(1e9)) conn, err := inputListener.Accept() if nil != err { if opErr, ok := err.(*net.OpError); ok && opErr.Timeout() { continue } t.Log(err) } reader := bufio.NewReaderSize(conn, 10485760) for { select { case <-stopChan: inputListener.Close() close(stoppedChan) return default: line, isPrefix, rerr := reader.ReadLine() if rerr == nil || rerr != io.EOF { if isPrefix { t.Log("incomplete line read from input") continue } else { *coll = append(*coll, string(line)) wg.Done() } } } } } } } func TestForwardHandler(t *testing.T) { util.PrepareEventFilter([]string{"alert"}, false) dir, err := ioutil.TempDir("", "test") if err != nil { t.Fatal(err) } defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) inputListener, err := net.Listen("unix", tmpfn) if err != nil { t.Fatal("error opening input socket:", err) } defer inputListener.Close() // prepare slice to hold collected strings coll := make([]string, 0) // setup comms channels clCh := make(chan bool) cldCh := make(chan bool) // start socket consumer var wg sync.WaitGroup wg.Add(2) go consumeSocket(inputListener, clCh, cldCh, t, &coll, &wg) // start forwarding handler fh := MakeForwardHandler(5, tmpfn) fh.Run() fhTypes := fh.GetEventTypes() if len(fhTypes) != 1 { t.Fatal("Forwarding handler should only claim one type") } if fhTypes[0] != "alert" { t.Fatal("Forwarding handler should claim 'alert' type") } if fh.GetName() != "Forwarding handler" { t.Fatal("Forwarding handler has wrong name") } e := makeEvent("alert", "foo1") fh.Consume(&e) e = makeEvent("http", "foo2") fh.Consume(&e) e = makeEvent("alert", "foo3") fh.Consume(&e) // stop forwarding handler scChan := make(chan bool) fh.Stop(scChan) <-scChan // wait for socket consumer to receive all wg.Wait() if len(coll) != 2 { t.Fatalf("unexpected number of alerts: %d != 2", len(coll)) } var eve types.EveOutEvent err = json.Unmarshal([]byte(coll[0]), &eve) if err != nil { t.Fatal(err) } if eve.DNS.Rrname != "foo1" { t.Fatalf("invalid event data, expected 'foo1', got %s", eve.DNS.Rrname) } err = json.Unmarshal([]byte(coll[1]), &eve) if err != nil { t.Fatal(err) } if eve.DNS.Rrname != "foo3" { t.Fatalf("invalid event data, expected 'foo3', got %s", eve.DNS.Rrname) } } func TestForwardAllHandler(t *testing.T) { util.PrepareEventFilter([]string{}, true) dir, err := ioutil.TempDir("", "test") if err != nil { t.Fatal(err) } defer os.RemoveAll(dir) tmpfn := filepath.Join(dir, fmt.Sprintf("t%d", rand.Int63())) inputListener, err := net.Listen("unix", tmpfn) if err != nil { t.Fatal("error opening input socket:", err) } defer inputListener.Close() // prepare slice to hold collected strings coll := make([]string, 0) // setup comms channels clCh := make(chan bool) cldCh := make(chan bool) // start socket consumer var wg sync.WaitGroup wg.Add(3) go consumeSocket(inputListener, clCh, cldCh, t, &coll, &wg) // start forwarding handler fh := MakeForwardHandler(5, tmpfn) fh.Run() fhTypes := fh.GetEventTypes() if len(fhTypes) != 1 { t.Fatal("Forwarding handler should only claim one type") } if fhTypes[0] != "*" { t.Fatal("Forwarding handler should claim '*' type") } if fh.GetName() != "Forwarding handler" { t.Fatal("Forwarding handler has wrong name") } e := makeEvent("alert", "foo1") fh.Consume(&e) e = makeEvent("http", "foo2") fh.Consume(&e) e = makeEvent("alert", "foo3") fh.Consume(&e) // stop forwarding handler scChan := make(chan bool) fh.Stop(scChan) <-scChan // stop socket consumer inputListener.Close() close(clCh) wg.Wait() if len(coll) != 3 { t.Fatalf("unexpected number of alerts: %d != 3", len(coll)) } var eve types.EveOutEvent err = json.Unmarshal([]byte(coll[0]), &eve) if err != nil { t.Fatal(err) } if eve.DNS.Rrname != "foo1" { t.Fatalf("invalid event data, expected 'foo1', got %s", eve.DNS.Rrname) } err = json.Unmarshal([]byte(coll[1]), &eve) if err != nil { t.Fatal(err) } if eve.DNS.Rrname != "foo2" { t.Fatalf("invalid event data, expected 'foo2', got %s", eve.DNS.Rrname) } err = json.Unmarshal([]byte(coll[2]), &eve) if err != nil { t.Fatal(err) } if eve.DNS.Rrname != "foo3" { t.Fatalf("invalid event data, expected 'foo3', got %s", eve.DNS.Rrname) } } fever-1.0.8/processing/handler.go000066400000000000000000000016201353566242200167560ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2017, DCSO GmbH import ( "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" ) // Handler is an interface describing the behaviour for a component to // handle events parsed from EVE input. type Handler interface { GetEventTypes() []string GetName() string Consume(*types.Entry) error } // ConcurrentHandler is an interface describing the behaviour for a component to // handle events parsed from EVE input, while concurrently performing other // actions, such as collecting, integrating and/or forwarding data. type ConcurrentHandler interface { Handler Run() Stop(chan bool) } // StatsGeneratingHandler is an interface describing a Handler which also // periodically outputs performance statistics using the provided // PerformanceStatsEncoder. type StatsGeneratingHandler interface { Handler SubmitStats(*util.PerformanceStatsEncoder) } fever-1.0.8/processing/handler_dispatcher.go000066400000000000000000000105101353566242200211620ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2017, 2018, DCSO GmbH import ( "sync" "time" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" log "github.com/sirupsen/logrus" ) // HandlerDispatcherPerfStats contains performance stats written to InfluxDB // for monitoring. type HandlerDispatcherPerfStats struct { DispatchedPerSec uint64 `influx:"dispatch_calls_per_sec"` } // HandlerDispatcher is a component to collect and properly apply a set of // Handlers to a stream of Entry objects. Handlers can register the event types // they are meant to act on and are called with relevant Entries to perform // their job. type HandlerDispatcher struct { Lock sync.Mutex DispatchMap map[string]([]Handler) DBHandler Handler PerfStats HandlerDispatcherPerfStats Logger *log.Entry StatsEncoder *util.PerformanceStatsEncoder StopCounterChan chan bool StoppedCounterChan chan bool } // DBHandler writes consumed events to a database. type DBHandler struct { OutChan chan types.Entry } // GetName just returns the name of the default handler func (h *DBHandler) GetName() string { return "Default handler" } // GetEventTypes here is a dummy method -- since this handler is never // registered we don't need to set this to an actual event type func (h *DBHandler) GetEventTypes() []string { return []string{"not applicable"} } // Consume simply emits ths consumed entry on the default output channel func (h *DBHandler) Consume(e *types.Entry) error { h.OutChan <- *e return nil } func (ad *HandlerDispatcher) runCounter() { sTime := time.Now() for { time.Sleep(500 * time.Millisecond) select { case <-ad.StopCounterChan: close(ad.StoppedCounterChan) return default: if ad.StatsEncoder == nil || time.Since(sTime) < ad.StatsEncoder.SubmitPeriod { continue } ad.Lock.Lock() ad.PerfStats.DispatchedPerSec /= uint64(ad.StatsEncoder.SubmitPeriod.Seconds()) ad.StatsEncoder.Submit(ad.PerfStats) ad.PerfStats.DispatchedPerSec = 0 sTime = time.Now() ad.Lock.Unlock() } } } // MakeHandlerDispatcher returns a new HandlerDispatcher. The channel passed // as an argument is used as an output channel for the default handler, which // simply forwards events to a given channel (for example to be written to a // database) func MakeHandlerDispatcher(databaseOut chan types.Entry) *HandlerDispatcher { ad := &HandlerDispatcher{ DispatchMap: make(map[string]([]Handler)), Logger: log.WithFields(log.Fields{ "domain": "dispatch", }), } if databaseOut != nil { ad.DBHandler = &DBHandler{ OutChan: databaseOut, } } ad.Logger.WithFields(log.Fields{ "type": "*", "name": "default handler", }).Debugf("event handler added") return ad } // RegisterHandler adds the given Handler to the set of callbacks to be // called on the relevant Entries received by the dispatcher. func (ad *HandlerDispatcher) RegisterHandler(agg Handler) { eventTypes := agg.GetEventTypes() for _, eventType := range eventTypes { if _, ok := ad.DispatchMap[eventType]; !ok { ad.DispatchMap[eventType] = make([]Handler, 0) } ad.DispatchMap[eventType] = append(ad.DispatchMap[eventType], agg) ad.Logger.WithFields(log.Fields{ "type": eventType, "name": agg.GetName(), }).Info("event handler added") } } // Dispatch applies the set of handlers currently registered in the dispatcher // to the Entry object passed to it. func (ad *HandlerDispatcher) Dispatch(e *types.Entry) { if _, ok := ad.DispatchMap[e.EventType]; ok { for _, agg := range ad.DispatchMap[e.EventType] { agg.Consume(e) } } if a, ok := ad.DispatchMap["*"]; ok { for _, agg := range a { agg.Consume(e) } } if ad.DBHandler != nil { ad.DBHandler.Consume(e) } ad.Lock.Lock() ad.PerfStats.DispatchedPerSec++ ad.Lock.Unlock() } // SubmitStats registers a PerformanceStatsEncoder for runtime stats submission. func (ad *HandlerDispatcher) SubmitStats(sc *util.PerformanceStatsEncoder) { ad.StatsEncoder = sc } // Run starts the background service for this handler func (ad *HandlerDispatcher) Run() { ad.StopCounterChan = make(chan bool) ad.StoppedCounterChan = make(chan bool) go ad.runCounter() } // Stop causes the handler to cease counting and submitting data func (ad *HandlerDispatcher) Stop(stopChan chan bool) { close(ad.StopCounterChan) <-ad.StoppedCounterChan close(stopChan) } fever-1.0.8/processing/handler_dispatcher_test.go000066400000000000000000000111321353566242200222220ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2017, DCSO GmbH import ( "fmt" "math/rand" "regexp" "testing" "time" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" "github.com/NeowayLabs/wabbit" "github.com/NeowayLabs/wabbit/amqptest" "github.com/NeowayLabs/wabbit/amqptest/server" ) type Test1Handler struct { Vals []string } func (h *Test1Handler) GetName() string { return "Test handler 1" } func (h *Test1Handler) GetEventTypes() []string { return []string{"dns"} } func (h *Test1Handler) Consume(e *types.Entry) error { h.Vals = append(h.Vals, e.JSONLine) return nil } type Test2Handler struct { Vals []string } func (h *Test2Handler) GetName() string { return "Test handler 2" } func (h *Test2Handler) GetEventTypes() []string { return []string{"http"} } func (h *Test2Handler) Consume(e *types.Entry) error { h.Vals = append(h.Vals, e.JSONLine) return nil } func TestHandlerDispatcherExampleHandler(t *testing.T) { outChan := make(chan types.Entry) closeChan := make(chan bool) defaultSelection := make([]string, 0) go func(closeChan chan bool, inChan chan types.Entry) { for v := range inChan { defaultSelection = append(defaultSelection, v.JSONLine) } close(closeChan) }(closeChan, outChan) ad := MakeHandlerDispatcher(outChan) t1 := &Test1Handler{ Vals: make([]string, 0), } ad.RegisterHandler(t1) t2 := &Test2Handler{ Vals: make([]string, 0), } ad.RegisterHandler(t2) rand.Seed(time.Now().UTC().UnixNano()) // make test entries typestrs := []string{"http", "dns", "flow", "foo"} var createdEntries [10000]types.Entry entries := make(map[string]([]string)) for i := 0; i < 10000; i++ { myIdentifier := fmt.Sprintf("val%d", i) myType := typestrs[rand.Intn(len(typestrs))] createdEntries[i] = types.Entry{ EventType: myType, JSONLine: myIdentifier, } if _, ok := entries[myType]; !ok { entries[myType] = make([]string, 0) } entries[myType] = append(entries[myType], myIdentifier) ad.Dispatch(&createdEntries[i]) } close(outChan) <-closeChan if len(t1.Vals) != len(entries["dns"]) { t.Fatalf("wrong number of 'dns' entries delivered to DNS handler (%d/%d)", len(t1.Vals), len(entries["dns"])) } for i := 0; i < len(t1.Vals); i++ { if t1.Vals[i] != entries["dns"][i] { t.Fatalf("'dns' pair of entries differs: %s/%s", t1.Vals[i], entries["dns"][i]) } } if len(t2.Vals) != len(entries["http"]) { t.Fatalf("wrong number of 'http' entries delivered to HTTP handler (%d/%d)", len(t2.Vals), len(entries["http"])) } for i := 0; i < len(t2.Vals); i++ { if t2.Vals[i] != entries["http"][i] { t.Fatalf("'http' pair of entries differs: %s/%s", t2.Vals[i], entries["http"][i]) } } } func TestHandlerDispatcherMonitoring(t *testing.T) { serverURL := "amqp://sensor:sensor@127.0.0.1:9999/%2f/" // start mock AMQP server fakeServer := server.NewServer(serverURL) fakeServer.Start() defer fakeServer.Stop() // set up consumer results := make([]string, 0) c, err := util.NewConsumer(serverURL, "nsm.test.metrics", "direct", "nsm.test.metrics.testqueue", "", "", func(d wabbit.Delivery) { results = append(results, string(d.Body())) }) if err != nil { t.Fatal(err) } // set up submitter statssubmitter, err := util.MakeAMQPSubmitterWithReconnector(serverURL, "nsm.test.metrics", true, func(url string) (wabbit.Conn, error) { // we pass in a custom reconnector which uses the amqptest implementation var conn wabbit.Conn conn, err = amqptest.Dial(url) return conn, err }) if err != nil { t.Fatal(err) } defer statssubmitter.Finish() // create InfluxDB line protocol encoder/submitter pse := util.MakePerformanceStatsEncoder(statssubmitter, 2*time.Second, false) outChan := make(chan types.Entry) closeChan := make(chan bool) ad := MakeHandlerDispatcher(outChan) ad.SubmitStats(pse) ad.Run() go func() { for i := 0; i < 100; i++ { ad.Dispatch(&types.Entry{ JSONLine: "foo", }) ad.Dispatch(&types.Entry{ JSONLine: "bar", }) ad.Dispatch(&types.Entry{ JSONLine: "baz", }) time.Sleep(50 * time.Millisecond) } }() go func(closeChan chan bool, inChan chan types.Entry) { i := 0 for v := range inChan { _ = v i++ if i == 300 { break } } close(closeChan) }(closeChan, outChan) <-closeChan close(outChan) stopChan := make(chan bool) ad.Stop(stopChan) <-stopChan c.Shutdown() if len(results) == 0 { t.Fatalf("unexpected result length: 0") } if match, _ := regexp.Match(fmt.Sprintf("^%s,[^ ]+ dispatch_calls_per_sec=[0-9]+", util.ToolName), []byte(results[0])); !match { t.Fatalf("unexpected match content: %s", results[0]) } } fever-1.0.8/processing/ip_handler.go000066400000000000000000000121561353566242200174540ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2018, DCSO GmbH import ( "bufio" "encoding/json" "fmt" "net" "os" "sync" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" log "github.com/sirupsen/logrus" "github.com/yl2chen/cidranger" ) // MakeIPAlertEntryForHit returns an alert Entry as raised by an external // IP hit. The resulting alert will retain // the triggering event's metadata as well as // its timestamp. func MakeIPAlertEntryForHit(e types.Entry, matchedIP string, rangerEntry cidranger.RangerEntry, alertPrefix string) types.Entry { var eve types.EveEvent var newEve types.EveEvent var err = json.Unmarshal([]byte(e.JSONLine), &eve) if err != nil { log.Warn(err, e.JSONLine) } else { sig := "%s Communication involving IP %s in listed range %s" matchedNet := rangerEntry.Network() matchedNetString := matchedNet.String() newEve = types.EveEvent{ EventType: "alert", Alert: &types.AlertEvent{ Action: "allowed", Category: "Potentially Bad Traffic", Signature: fmt.Sprintf(sig, alertPrefix, matchedIP, matchedNetString), }, Stream: eve.Stream, InIface: eve.InIface, SrcIP: eve.SrcIP, SrcPort: eve.SrcPort, DestIP: eve.DestIP, DestPort: eve.DestPort, Proto: eve.Proto, TxID: eve.TxID, Timestamp: eve.Timestamp, PacketInfo: eve.PacketInfo, HTTP: eve.HTTP, DNS: eve.DNS, Flow: eve.Flow, SMTP: eve.SMTP, SSH: eve.SSH, Email: eve.Email, TLS: eve.TLS, } } newEntry := e json, err := json.Marshal(newEve) if err != nil { log.Warn(err) } else { newEntry.JSONLine = string(json) } newEntry.EventType = "alert" return newEntry } // IPHandler is a Handler which is meant to check for the presence of // event type-specific keywords in a Bloom filter, raising new 'alert' type // events when matches are found. type IPHandler struct { sync.Mutex Logger *log.Entry Name string EventType string Ranger cidranger.Ranger IPListFilename string DatabaseEventChan chan types.Entry ForwardHandler Handler DoForwardAlert bool AlertPrefix string } // MakeIPHandler returns a new IPHandler, checking against the given // IP ranges and sending alerts to databaseChan as well as forwarding them // to a given forwarding handler. func MakeIPHandler(ranger cidranger.Ranger, databaseChan chan types.Entry, forwardHandler Handler, alertPrefix string) *IPHandler { bh := &IPHandler{ Logger: log.WithFields(log.Fields{ "domain": "ip-blacklist", }), Ranger: ranger, DatabaseEventChan: databaseChan, ForwardHandler: forwardHandler, DoForwardAlert: (util.ForwardAllEvents || util.AllowType("alert")), AlertPrefix: alertPrefix, } log.WithFields(log.Fields{}).Info("IP range list loaded") return bh } func rangerFromFile(IPListFilename string) (cidranger.Ranger, error) { inFile, err := os.Open(IPListFilename) if err != nil { return nil, err } defer inFile.Close() ranger := cidranger.NewPCTrieRanger() scanner := bufio.NewScanner(inFile) scanner.Split(bufio.ScanLines) for scanner.Scan() { lineText := scanner.Text() _, network, err := net.ParseCIDR(lineText) if err != nil { log.Warnf("invalid IP range %s, skipping", lineText) } else { log.Debugf("adding IP range %s", lineText) ranger.Insert(cidranger.NewBasicRangerEntry(*network)) } } return ranger, nil } // MakeIPHandlerFromFile returns a new IPHandler created from a new // IP range list specified by the given file name. func MakeIPHandlerFromFile(IPListFilename string, databaseChan chan types.Entry, forwardHandler Handler, alertPrefix string) (*IPHandler, error) { ranger, err := rangerFromFile(IPListFilename) if err != nil { return nil, err } ih := MakeIPHandler(ranger, databaseChan, forwardHandler, alertPrefix) ih.IPListFilename = IPListFilename return ih, nil } // Reload triggers a reload of the contents of the IP list file. func (a *IPHandler) Reload() error { ranger, err := rangerFromFile(a.IPListFilename) if err != nil { return err } a.Lock() a.Ranger = ranger a.Unlock() return nil } // Consume processes an Entry, emitting alerts if there is a match func (a *IPHandler) Consume(e *types.Entry) error { a.Lock() srcRanges, err := a.Ranger.ContainingNetworks(net.ParseIP(e.SrcIP)) if err != nil { log.Warn(err) } for _, v := range srcRanges { n := MakeIPAlertEntryForHit(*e, e.SrcIP, v, a.AlertPrefix) a.DatabaseEventChan <- n a.ForwardHandler.Consume(&n) } dstRanges, err := a.Ranger.ContainingNetworks(net.ParseIP(e.DestIP)) if err != nil { log.Warn(err) } for _, v := range dstRanges { n := MakeIPAlertEntryForHit(*e, e.DestIP, v, a.AlertPrefix) a.DatabaseEventChan <- n a.ForwardHandler.Consume(&n) } a.Unlock() return nil } // GetName returns the name of the handler func (a *IPHandler) GetName() string { return "IP blacklist handler" } // GetEventTypes returns a slice of event type strings that this handler // should be applied to func (a *IPHandler) GetEventTypes() []string { return []string{"http", "dns", "tls", "smtp", "flow", "ssh", "tls", "smb"} } fever-1.0.8/processing/ip_handler_test.go000066400000000000000000000126361353566242200205160ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2018, DCSO GmbH import ( "bufio" "encoding/json" "io/ioutil" "math/rand" "net" "os" "regexp" "testing" "time" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" log "github.com/sirupsen/logrus" "github.com/sirupsen/logrus/hooks/test" "github.com/yl2chen/cidranger" ) var ( reIPmsg = regexp.MustCompile(`Communication involving IP ([^ ]+) in listed range ([^ ]+)`) ) func makeIPHTTPEvent(srcip string, dstip string) types.Entry { e := types.Entry{ SrcIP: srcip, SrcPort: int64(rand.Intn(60000) + 1025), DestIP: dstip, DestPort: 80, Timestamp: time.Now().Format(types.SuricataTimestampFormat), EventType: "http", Proto: "TCP", HTTPHost: "http://foo.bar", HTTPUrl: "/baz", HTTPMethod: "GET", } eve := types.EveEvent{ EventType: e.EventType, SrcIP: e.SrcIP, SrcPort: int(e.SrcPort), DestIP: e.DestIP, DestPort: int(e.DestPort), Proto: e.Proto, HTTP: &types.HTTPEvent{ Hostname: e.HTTPHost, URL: e.HTTPUrl, }, } json, err := json.Marshal(eve) if err != nil { log.Warn(err) } else { e.JSONLine = string(json) } return e } // IPCollectorHandler gathers consumed alerts in a list type IPCollectorHandler struct { Entries []string } func (h *IPCollectorHandler) GetName() string { return "Collector handler" } func (h *IPCollectorHandler) GetEventTypes() []string { return []string{"alert"} } func (h *IPCollectorHandler) Consume(e *types.Entry) error { log.Info(e.JSONLine) match := reIPmsg.FindStringSubmatch(e.JSONLine) if match != nil { h.Entries = append(h.Entries, e.JSONLine) return nil } return nil } func TestIPHandler(t *testing.T) { // make sure that alerts are forwarded util.PrepareEventFilter([]string{"alert"}, false) // channel to receive events to be saved to database dbChan := make(chan types.Entry) // handler to receive forwarded events fwhandler := &IPCollectorHandler{ Entries: make([]string, 0), } // concurrently gather entries to be written to DB dbWritten := make([]types.Entry, 0) consumeWaitChan := make(chan bool) go func() { for e := range dbChan { dbWritten = append(dbWritten, e) } close(consumeWaitChan) }() // make test ranger _, network, _ := net.ParseCIDR("10.0.0.1/32") rng := cidranger.NewPCTrieRanger() rng.Insert(cidranger.NewBasicRangerEntry(*network)) ih := MakeIPHandler(rng, dbChan, fwhandler, "IPF") bhTypes := ih.GetEventTypes() if len(bhTypes) != 8 { t.Fatal("IP handler should claim eight types") } if ih.GetName() != "IP blacklist handler" { t.Fatal("IP handler has wrong name") } e := makeIPHTTPEvent("10.0.0.1", "10.0.0.2") ih.Consume(&e) e = makeIPHTTPEvent("10.0.0.3", "10.0.0.2") ih.Consume(&e) e = makeIPHTTPEvent("10.0.0.3", "10.0.0.1") ih.Consume(&e) // wait until all values have been collected close(dbChan) <-consumeWaitChan // check that we haven't missed anything if len(fwhandler.Entries) < 2 { t.Fatalf("expected %d forwarded BLF alerts, seen less (%d)", 2, len(fwhandler.Entries)) } } func TestIPHandlerFromFile(t *testing.T) { // make sure that alerts are forwarded util.PrepareEventFilter([]string{"alert"}, false) // channel to receive events to be saved to database dbChan := make(chan types.Entry) // handler to receive forwarded events fwhandler := &IPCollectorHandler{ Entries: make([]string, 0), } // concurrently gather entries to be written to DB dbWritten := make([]types.Entry, 0) consumeWaitChan := make(chan bool) go func() { for e := range dbChan { dbWritten = append(dbWritten, e) } close(consumeWaitChan) }() ipFile, err := ioutil.TempFile("", "ipexample") if err != nil { t.Fatal(err) } defer os.Remove(ipFile.Name()) w := bufio.NewWriter(ipFile) _, err = w.WriteString("10.0.0.1/32\n") if err != nil { t.Fatal(err) } w.Flush() ipFile.Close() ih, err := MakeIPHandlerFromFile(ipFile.Name(), dbChan, fwhandler, "IPF") if err != nil { t.Fatal(err) } bhTypes := ih.GetEventTypes() if len(bhTypes) != 8 { t.Fatal("IP handler should claim eight types") } if ih.GetName() != "IP blacklist handler" { t.Fatal("IP handler has wrong name") } e := makeIPHTTPEvent("10.0.0.1", "10.0.0.2") ih.Consume(&e) e = makeIPHTTPEvent("10.0.0.3", "10.0.0.2") ih.Consume(&e) e = makeIPHTTPEvent("10.0.0.3", "10.0.0.1") ih.Consume(&e) // wait until all values have been collected close(dbChan) <-consumeWaitChan // check that we haven't missed anything if len(fwhandler.Entries) < 2 { t.Fatalf("expected %d forwarded BLF alerts, seen less (%d)", 2, len(fwhandler.Entries)) } } func TestIPHandlerFromFileInvalidFormat(t *testing.T) { // channel to receive events to be saved to database dbChan := make(chan types.Entry) // handler to receive forwarded events fwhandler := &IPCollectorHandler{ Entries: make([]string, 0), } ipFile, err := ioutil.TempFile("", "invalidipexample") if err != nil { t.Fatal(err) } defer os.Remove(ipFile.Name()) w := bufio.NewWriter(ipFile) _, err = w.WriteString("10.0.0.1/3q5435\n") if err != nil { t.Fatal(err) } w.Flush() ipFile.Close() hook := test.NewGlobal() _, err = MakeIPHandlerFromFile(ipFile.Name(), dbChan, fwhandler, "IPF") if err != nil { t.Fatal(err) } entries := hook.AllEntries() if len(entries) < 2 { t.Fatal("missing log entries") } if entries[0].Message != "invalid IP range 10.0.0.1/3q5435, skipping" { t.Fatal("wrong log entry for invalid IP range") } } fever-1.0.8/processing/pdns_collector.go000066400000000000000000000154511353566242200203620ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2017, 2019, DCSO GmbH import ( "bytes" "encoding/json" "os" "sync" "time" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" log "github.com/sirupsen/logrus" ) // pDNSReplyDetails holds data for a DNS answer. type pDNSReplyDetails struct { AnsweringHost string `json:"answering_host,omitempty"` Rrtype string `json:"rrtype,omitempty"` Rdata string `json:"rdata,omitempty"` Rcode string `json:"rcode,omitempty"` Type string `json:"type,omitempty"` Count uint64 `json:"count,omitempty"` } // pDNSDetails holds summarized stats for a given domain name. type pDNSDetails struct { AnswerSet map[string]*pDNSReplyDetails `json:"-"` Details []pDNSReplyDetails `json:"rdata,omitempty"` } type pDNSEvent struct { TimestampFrom time.Time `json:"timestamp_start"` TimestampTo time.Time `json:"timestamp_end"` DNSDetails map[string]*pDNSDetails `json:"dns,omitempty"` SensorID string `json:"sensor_id,omitempty"` } // PDNSCollector extracts and aggregates DNS response data from // EVE events and sends them to the backend. type PDNSCollector struct { SensorID string Count int64 DNSMutex sync.RWMutex DNS pDNSEvent StringBuf bytes.Buffer FlushPeriod time.Duration CloseChan chan bool ClosedChan chan bool Logger *log.Entry Submitter util.StatsSubmitter SubmitChannel chan []byte } // MakePDNSCollector creates a new pDNSCollector. func MakePDNSCollector(flushPeriod time.Duration, submitter util.StatsSubmitter) (*PDNSCollector, error) { sensorID, err := util.GetSensorID() if err != nil { return nil, err } a := &PDNSCollector{ FlushPeriod: flushPeriod, Logger: log.WithFields(log.Fields{ "domain": "pdns", }), DNS: pDNSEvent{ TimestampFrom: time.Now().UTC(), SensorID: sensorID, DNSDetails: make(map[string]*pDNSDetails), }, CloseChan: make(chan bool), ClosedChan: make(chan bool), SubmitChannel: make(chan []byte, 60), Submitter: submitter, SensorID: sensorID, } a.SensorID, _ = os.Hostname() return a, nil } func (a *PDNSCollector) flush() { a.DNSMutex.Lock() myDNS := a.DNS myDNS.TimestampTo = time.Now().UTC() a.DNS = pDNSEvent{ TimestampFrom: time.Now().UTC(), SensorID: a.SensorID, DNSDetails: make(map[string]*pDNSDetails), } a.Count = 0 a.DNSMutex.Unlock() jsonString, myerror := json.MarshalIndent(myDNS, "", " ") if myerror == nil { select { case a.SubmitChannel <- jsonString: break default: log.Warning("pDNS channel is full, cannot submit message...") } } else { a.Logger.Warn("error marshaling JSON for passive DNS") } } func (a *PDNSCollector) countRequestV1(e *types.Entry) { a.DNSMutex.Lock() a.Count++ if e.DNSRRName == "" { a.DNSMutex.Unlock() return } key := e.DNSRRName a.StringBuf.Write([]byte(e.SrcIP)) a.StringBuf.Write([]byte(e.DNSRRType)) a.StringBuf.Write([]byte(e.DNSRData)) a.StringBuf.Write([]byte(e.DNSRCode)) a.StringBuf.Write([]byte(e.DNSType)) k := a.StringBuf.String() a.StringBuf.Reset() if _, ok := a.DNS.DNSDetails[key]; !ok { a.DNS.DNSDetails[key] = &pDNSDetails{ AnswerSet: make(map[string]*pDNSReplyDetails), Details: []pDNSReplyDetails{ pDNSReplyDetails{ AnsweringHost: e.SrcIP, Rrtype: e.DNSRRType, Rdata: e.DNSRData, Rcode: e.DNSRCode, Type: e.DNSType, Count: 1, }, }, } a.DNS.DNSDetails[key].AnswerSet[k] = &a.DNS.DNSDetails[key].Details[0] } else { as, ok := a.DNS.DNSDetails[key].AnswerSet[k] if !ok { newDetail := pDNSReplyDetails{ AnsweringHost: e.SrcIP, Rrtype: e.DNSRRType, Rdata: e.DNSRData, Rcode: e.DNSRCode, Type: e.DNSType, Count: 1, } a.DNS.DNSDetails[key].Details = append(a.DNS.DNSDetails[key].Details, newDetail) a.DNS.DNSDetails[key].AnswerSet[k] = &a.DNS.DNSDetails[key].Details[len(a.DNS.DNSDetails[key].AnswerSet)-1] } else { as.Count++ } } a.DNSMutex.Unlock() } func (a *PDNSCollector) countRequestV2(e *types.Entry) { a.DNSMutex.Lock() a.Count++ if e.DNSRRName == "" || len(e.DNSAnswers) == 0 { a.DNSMutex.Unlock() return } for _, v := range e.DNSAnswers { key := e.DNSRRName a.StringBuf.Write([]byte(e.SrcIP)) a.StringBuf.Write([]byte(v.DNSRRType)) a.StringBuf.Write([]byte(v.DNSRData)) a.StringBuf.Write([]byte(v.DNSRCode)) a.StringBuf.Write([]byte(v.DNSType)) k := a.StringBuf.String() a.StringBuf.Reset() if _, ok := a.DNS.DNSDetails[key]; !ok { a.DNS.DNSDetails[key] = &pDNSDetails{ AnswerSet: make(map[string]*pDNSReplyDetails), Details: []pDNSReplyDetails{ pDNSReplyDetails{ AnsweringHost: e.SrcIP, Rrtype: v.DNSRRType, Rdata: v.DNSRData, Rcode: v.DNSRCode, Type: v.DNSType, Count: 1, }, }, } a.DNS.DNSDetails[key].AnswerSet[k] = &a.DNS.DNSDetails[key].Details[0] } else { as, ok := a.DNS.DNSDetails[key].AnswerSet[k] if !ok { newDetail := pDNSReplyDetails{ AnsweringHost: e.SrcIP, Rrtype: v.DNSRRType, Rdata: v.DNSRData, Rcode: v.DNSRCode, Type: v.DNSType, Count: 1, } a.DNS.DNSDetails[key].Details = append(a.DNS.DNSDetails[key].Details, newDetail) a.DNS.DNSDetails[key].AnswerSet[k] = &a.DNS.DNSDetails[key].Details[len(a.DNS.DNSDetails[key].AnswerSet)-1] } else { as.Count++ } } } a.DNSMutex.Unlock() } // Consume processes an Entry, adding the data within to the internal // aggregated state func (a *PDNSCollector) Consume(e *types.Entry) error { if e.DNSType == "answer" { if e.DNSVersion == 2 { a.countRequestV2(e) } else { a.countRequestV1(e) } } return nil } // Run starts the background aggregation service for this handler func (a *PDNSCollector) Run() { go func() { for message := range a.SubmitChannel { a.Submitter.Submit(message, "pdns", "application/json") } }() go func() { i := 0 * time.Second for { select { case <-a.CloseChan: close(a.SubmitChannel) close(a.ClosedChan) return default: if i >= a.FlushPeriod { a.flush() i = 0 * time.Second } time.Sleep(1 * time.Second) i += 1 * time.Second } } }() } // Stop causes the aggregator to cease aggregating and submitting data func (a *PDNSCollector) Stop(stopChan chan bool) { close(a.CloseChan) <-a.ClosedChan close(stopChan) } // GetName returns the name of the handler func (a *PDNSCollector) GetName() string { return "passive DNS collector" } // GetEventTypes returns a slice of event type strings that this handler // should be applied to func (a *PDNSCollector) GetEventTypes() []string { return []string{"dns"} } fever-1.0.8/processing/rdns_handler.go000066400000000000000000000047421353566242200200140ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2019, DCSO GmbH import ( "net" "sync" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" log "github.com/sirupsen/logrus" "github.com/yl2chen/cidranger" ) // RDNSHandler is a handler that enriches events with reverse DNS // information looked up on the sensor, for both source and destination // IP addresses. type RDNSHandler struct { sync.Mutex Logger *log.Entry HostNamer *util.HostNamer PrivateRanges cidranger.Ranger PrivateRangesOnly bool } // MakeRDNSHandler returns a new RDNSHandler, backed by the passed HostNamer. func MakeRDNSHandler(hn *util.HostNamer) *RDNSHandler { rh := &RDNSHandler{ Logger: log.WithFields(log.Fields{ "domain": "rdns", }), PrivateRanges: cidranger.NewPCTrieRanger(), HostNamer: hn, } for _, cidr := range []string{ "10.0.0.0/8", "172.16.0.0/12", "192.168.0.0/16", "fc00::/7", } { _, block, err := net.ParseCIDR(cidr) if err != nil { log.Fatalf("cannot parse fixed private IP range %v", cidr) } rh.PrivateRanges.Insert(cidranger.NewBasicRangerEntry(*block)) } return rh } // EnableOnlyPrivateIPRanges ensures that only private (RFC1918) IP ranges // are enriched func (a *RDNSHandler) EnableOnlyPrivateIPRanges() { a.PrivateRangesOnly = true } // Consume processes an Entry and enriches it func (a *RDNSHandler) Consume(e *types.Entry) error { var res []string var err error var isPrivate bool if e.SrcIP != "" { ip := net.ParseIP(e.SrcIP) if ip != nil { isPrivate, err = a.PrivateRanges.Contains(ip) if err != nil { return err } if !a.PrivateRangesOnly || isPrivate { res, err = a.HostNamer.GetHostname(e.SrcIP) if err == nil { e.SrcHosts = res } } } else { log.Error("IP not valid") } } if e.DestIP != "" { ip := net.ParseIP(e.DestIP) if ip != nil { isPrivate, err = a.PrivateRanges.Contains(ip) if err != nil { return err } if !a.PrivateRangesOnly || isPrivate { res, err = a.HostNamer.GetHostname(e.DestIP) if err == nil { e.DestHosts = res } } } else { log.Error("IP not valid") } } return nil } // GetName returns the name of the handler func (a *RDNSHandler) GetName() string { return "reverse DNS handler" } // GetEventTypes returns a slice of event type strings that this handler // should be applied to func (a *RDNSHandler) GetEventTypes() []string { return []string{"http", "dns", "tls", "smtp", "flow", "ssh", "tls", "smb", "alert"} } fever-1.0.8/processing/unicorn_aggregator.go000066400000000000000000000131421353566242200212220ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2017, DCSO GmbH import ( "bytes" "encoding/json" "os" "strconv" "sync" "time" "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" log "github.com/sirupsen/logrus" ) // UnicornAggregate represents UNICORN relevant aggregated flow stats. type UnicornAggregate struct { SensorID string `json:"sensor-id"` TimestampStart time.Time `json:"time-start"` TimestampEnd time.Time `json:"time-end"` FlowTuples map[string](map[string]int64) `json:"tuples"` ProxyMap map[string](map[string]int64) `json:"proxy-map"` } // UnicornAggregator collects and updates an internal structure of flow // events grouped by route type UnicornAggregator struct { Logger *log.Entry Name string EventType string Aggregate UnicornAggregate Submitter util.StatsSubmitter DummyMode bool SubmitPeriod time.Duration CloseChan chan bool ClosedChan chan bool StringBuf bytes.Buffer UnicornTuplesMutex sync.RWMutex `json:"-"` UnicornProxyMapMutex sync.RWMutex `json:"-"` } // MakeUnicornAggregate creates a new empty UnicornAggregate object. func MakeUnicornAggregate() *UnicornAggregate { a := &UnicornAggregate{} a.SensorID, _ = os.Hostname() a.FlowTuples = make(map[string](map[string]int64)) a.ProxyMap = make(map[string](map[string]int64)) return a } // MakeUnicornAggregator creates a new empty UnicornAggregator object. func MakeUnicornAggregator(statsSubmitter util.StatsSubmitter, submitPeriod time.Duration, dummyMode bool) *UnicornAggregator { a := &UnicornAggregator{ Logger: log.WithFields(log.Fields{ "domain": "aggregate", }), Submitter: statsSubmitter, DummyMode: dummyMode, SubmitPeriod: submitPeriod, CloseChan: make(chan bool), ClosedChan: make(chan bool), Aggregate: *MakeUnicornAggregate(), } return a } func (a *UnicornAggregator) start() { timestamp := time.Now() a.Logger.WithFields(log.Fields{ "timestamp": timestamp, }).Debug("aggregation started") a.Aggregate.TimestampStart = timestamp } func (a *UnicornAggregator) stop() { timestamp := time.Now() a.Logger.WithFields(log.Fields{ "timestamp": timestamp, }).Debug("aggregation stopped") a.Aggregate.TimestampEnd = timestamp } func (a *UnicornAggregator) submit(submitter util.StatsSubmitter, dummyMode bool) { a.UnicornTuplesMutex.Lock() a.UnicornProxyMapMutex.Lock() jsonString, myerror := json.Marshal(a.Aggregate) if myerror == nil { a.Logger.WithFields(log.Fields{ "flowtuples": len(a.Aggregate.FlowTuples), "http-destips": len(a.Aggregate.ProxyMap)}, ).Info("preparing to submit") submitter.Submit(jsonString, "unicorn", "application/json") } else { a.Logger.Warn("error marshaling JSON for metadata aggregation") } a.Aggregate.FlowTuples = make(map[string](map[string]int64)) a.Aggregate.ProxyMap = make(map[string](map[string]int64)) a.UnicornTuplesMutex.Unlock() a.UnicornProxyMapMutex.Unlock() } // CountFlowTuple increments the flow tuple counter for the given key. func (a *UnicornAggregator) CountFlowTuple(key string, bytestoclient int64, bytestoserver int64) { a.UnicornTuplesMutex.Lock() if _, ok := a.Aggregate.FlowTuples[key]; !ok { a.Aggregate.FlowTuples[key] = make(map[string]int64) } a.Aggregate.FlowTuples[key]["count"]++ a.Aggregate.FlowTuples[key]["total_bytes_toclient"] += bytestoclient a.Aggregate.FlowTuples[key]["total_bytes_toserver"] += bytestoserver a.UnicornTuplesMutex.Unlock() } // CountHTTPHost increments the count for the given IP-hostname pair. func (a *UnicornAggregator) CountHTTPHost(destip string, hostname string) { a.UnicornProxyMapMutex.Lock() if _, ok := a.Aggregate.ProxyMap[destip]; !ok { a.Aggregate.ProxyMap[destip] = make(map[string]int64) } a.Aggregate.ProxyMap[destip][hostname]++ a.UnicornProxyMapMutex.Unlock() } // Run starts the background aggregation service for this handler func (a *UnicornAggregator) Run() { go func() { i := 0 * time.Second a.start() for { select { case <-a.CloseChan: close(a.ClosedChan) return default: if i >= a.SubmitPeriod { a.stop() a.submit(a.Submitter, a.DummyMode) a.start() i = 0 * time.Second } time.Sleep(100 * time.Millisecond) i += 100 * time.Millisecond } } }() } // Stop causes the aggregator to cease aggregating and submitting data func (a *UnicornAggregator) Stop(stopChan chan bool) { close(a.CloseChan) <-a.ClosedChan close(stopChan) } // Consume processes an Entry, adding the data within to the internal // aggregated state func (a *UnicornAggregator) Consume(e *types.Entry) error { // Unicorn flow aggregation update if e.EventType == "flow" && e.Proto == "TCP" && e.BytesToClient > 0 { a.StringBuf.Write([]byte(e.SrcIP)) a.StringBuf.Write([]byte("_")) a.StringBuf.Write([]byte(e.DestIP)) a.StringBuf.Write([]byte("_")) a.StringBuf.Write([]byte(strconv.Itoa(int(e.DestPort)))) a.CountFlowTuple(a.StringBuf.String(), e.BytesToClient, e.BytesToServer) a.StringBuf.Reset() } // Proxy detection update if e.EventType == "http" { if (e.DestPort >= 8000 && e.DestPort <= 8999) || e.DestPort == 3128 || e.DestPort == 80 { a.CountHTTPHost(e.DestIP, e.HTTPHost) } } return nil } // GetName returns the name of the handler func (a *UnicornAggregator) GetName() string { return "Unicorn aggregator/submitter" } // GetEventTypes returns a slice of event type strings that this handler // should be applied to func (a *UnicornAggregator) GetEventTypes() []string { return []string{"http", "flow"} } fever-1.0.8/processing/unicorn_aggregator_test.go000066400000000000000000000137001353566242200222610ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2017, 2019, DCSO GmbH import ( "encoding/json" "fmt" "math/rand" "sync" "testing" "time" "github.com/DCSO/fever/types" log "github.com/sirupsen/logrus" ) func makeUnicornFlowEvent() types.Entry { e := types.Entry{ SrcIP: fmt.Sprintf("10.%d.%d.%d", rand.Intn(250), rand.Intn(250), rand.Intn(250)), SrcPort: []int64{1, 2, 3, 4, 5}[rand.Intn(5)], DestIP: fmt.Sprintf("10.0.0.%d", rand.Intn(250)), DestPort: []int64{11, 12, 13, 14, 15}[rand.Intn(5)], Timestamp: time.Now().Format(types.SuricataTimestampFormat), EventType: "flow", Proto: "TCP", BytesToClient: int64(rand.Intn(10000)), BytesToServer: int64(rand.Intn(10000)), PktsToClient: int64(rand.Intn(100)), PktsToServer: int64(rand.Intn(100)), } jsonBytes, _ := json.Marshal(e) e.JSONLine = string(jsonBytes) return e } type testSubmitter struct { DataLock sync.Mutex Data []string } func (s *testSubmitter) Submit(in []byte, key string, contentType string) { s.DataLock.Lock() defer s.DataLock.Unlock() s.Data = append(s.Data, string(in)) } func (s *testSubmitter) SubmitWithHeaders(in []byte, key string, contentType string, hdr map[string]string) { s.Submit(in, key, contentType) } func (s *testSubmitter) GetNumberSubmissions() int { s.DataLock.Lock() defer s.DataLock.Unlock() return len(s.Data) } func (s *testSubmitter) GetTotalAggs() int { s.DataLock.Lock() defer s.DataLock.Unlock() totalTuples := make(map[string](int)) for _, data := range s.Data { var agg UnicornAggregate err := json.Unmarshal([]byte(data), &agg) if err != nil { log.Fatalf("error parsing JSON: %s", err.Error()) } for k := range agg.FlowTuples { totalTuples[k]++ } } return len(totalTuples) } func (s *testSubmitter) GetFlowTuples() map[string](map[string]int64) { s.DataLock.Lock() defer s.DataLock.Unlock() allTuples := make(map[string](map[string]int64)) for _, data := range s.Data { var agg UnicornAggregate err := json.Unmarshal([]byte(data), &agg) if err != nil { log.Fatalf("error parsing JSON: %s", err.Error()) } for k := range agg.FlowTuples { if _, ok := allTuples[k]; !ok { allTuples[k] = make(map[string]int64) } allTuples[k]["count"] += agg.FlowTuples[k]["count"] } } return allTuples } func (s *testSubmitter) UseCompression() {} func (s *testSubmitter) Finish() {} func TestUnicornAggregatorNoSubmission(t *testing.T) { rand.Seed(time.Now().UTC().UnixNano()) dsub := &testSubmitter{ Data: make([]string, 0), } f := MakeUnicornAggregator(dsub, 100*time.Millisecond, false) f.Run() time.Sleep(1 * time.Second) consumeWaitChan := make(chan bool) f.Stop(consumeWaitChan) <-consumeWaitChan if dsub.GetNumberSubmissions() == 0 { t.Fatalf("collected aggregations are empty") } var totallen int for _, v := range dsub.Data { totallen += len(v) } if totallen == 0 { t.Fatalf("length of collected aggregations is zero") } } func TestUnicornAggregator(t *testing.T) { rand.Seed(time.Now().UTC().UnixNano()) dsub := &testSubmitter{ Data: make([]string, 0), } f := MakeUnicornAggregator(dsub, 500*time.Millisecond, false) f.Run() createdFlows := make(map[string]int) for i := 0; i < 200000; i++ { ev := makeUnicornFlowEvent() if ev.BytesToClient > 0 { key := fmt.Sprintf("%s_%s_%d", ev.SrcIP, ev.DestIP, ev.DestPort) createdFlows[key]++ } f.Consume(&ev) } for { if dsub.GetTotalAggs() < len(createdFlows) { log.Debug(dsub.GetTotalAggs()) time.Sleep(100 * time.Millisecond) } else { break } } consumeWaitChan := make(chan bool) f.Stop(consumeWaitChan) <-consumeWaitChan if len(dsub.Data) == 0 { t.Fatalf("collected aggregations are empty") } log.Info(dsub.GetTotalAggs(), len(createdFlows), len(dsub.Data)) var totallen int for _, v := range dsub.Data { totallen += len(v) } if totallen == 0 { t.Fatalf("length of collected aggregations is zero") } if dsub.GetTotalAggs() != len(createdFlows) { t.Fatalf("unexpected number of flow aggregates: %d/%d", dsub.GetTotalAggs(), len(createdFlows)) } for k, v := range dsub.GetFlowTuples() { if _, ok := createdFlows[k]; !ok { t.Fatalf("missing flow aggregate: %s", k) } if v["count"] != int64(createdFlows[k]) { t.Fatalf("unexpected number of flows for %s: %d/%d", k, v["count"], createdFlows[k]) } } } func TestUnicornAggregatorWithDispatch(t *testing.T) { rand.Seed(time.Now().UTC().UnixNano()) dsub := &testSubmitter{ Data: make([]string, 0), } f := MakeUnicornAggregator(dsub, 500*time.Millisecond, false) feedWaitChan := make(chan bool) outChan := make(chan types.Entry) go func() { for range outChan { // pass } close(feedWaitChan) }() d := MakeHandlerDispatcher(outChan) d.RegisterHandler(f) f.Run() createdFlows := make(map[string]int) for i := 0; i < 200000; i++ { ev := makeUnicornFlowEvent() if ev.BytesToClient > 0 { key := fmt.Sprintf("%s_%s_%d", ev.SrcIP, ev.DestIP, ev.DestPort) createdFlows[key]++ } d.Dispatch(&ev) } for { if dsub.GetTotalAggs() < len(createdFlows) { log.Debug(dsub.GetTotalAggs()) time.Sleep(100 * time.Millisecond) } else { break } } consumeWaitChan := make(chan bool) f.Stop(consumeWaitChan) close(outChan) <-feedWaitChan <-consumeWaitChan if len(dsub.Data) == 0 { t.Fatalf("collected aggregations are empty") } log.Info(dsub.GetTotalAggs(), len(createdFlows), len(dsub.Data)) var totallen int for _, v := range dsub.Data { totallen += len(v) } if totallen == 0 { t.Fatalf("length of collected aggregations is zero") } if dsub.GetTotalAggs() != len(createdFlows) { t.Fatalf("unexpected number of flow aggregates: %d/%d", dsub.GetTotalAggs(), len(createdFlows)) } for k, v := range dsub.GetFlowTuples() { if _, ok := createdFlows[k]; !ok { t.Fatalf("missing flow aggregate: %s", k) } if v["count"] != int64(createdFlows[k]) { t.Fatalf("unexpected number of flows for %s: %d/%d", k, v["count"], createdFlows[k]) } } } fever-1.0.8/processing/void_handler.go000066400000000000000000000016611353566242200200040ustar00rootroot00000000000000package processing // DCSO FEVER // Copyright (c) 2019, DCSO GmbH import ( "github.com/DCSO/fever/types" "github.com/DCSO/fever/util" log "github.com/sirupsen/logrus" ) // VoidHandler is a handler that does nothing. type VoidHandler struct { Logger *log.Entry } // MakeVoidHandler creates a new forwarding handler func MakeVoidHandler() *VoidHandler { fh := &VoidHandler{ Logger: log.WithFields(log.Fields{ "domain": "forward", }), } return fh } // Consume processes an Entry and discards it func (fh *VoidHandler) Consume(e *types.Entry) error { _ = e return nil } // GetName returns the name of the handler func (fh *VoidHandler) GetName() string { return "Void forwarding handler" } // GetEventTypes returns a slice of event type strings that this handler // should be applied to func (fh *VoidHandler) GetEventTypes() []string { if util.ForwardAllEvents { return []string{"*"} } return util.GetAllowedTypes() } fever-1.0.8/scripts/000077500000000000000000000000001353566242200143265ustar00rootroot00000000000000fever-1.0.8/scripts/makelpush000077500000000000000000000003021353566242200162400ustar00rootroot00000000000000#!/usr/bin/env lua for line in io.lines() do io.stdout:write("LPUSH suricata \"") escapedstr = string.gsub(line, "\"", "\\\"") io.stdout:write(escapedstr) io.stdout:write("\"\r\n") end fever-1.0.8/types/000077500000000000000000000000001353566242200140035ustar00rootroot00000000000000fever-1.0.8/types/entry.go000066400000000000000000000016271353566242200155010ustar00rootroot00000000000000package types // DCSO FEVER // Copyright (c) 2017, 2018, DCSO GmbH // DNSAnswer is a single DNS answer as observed by Suricata type DNSAnswer struct { DNSRRName string DNSRRType string DNSRCode string DNSRData string DNSType string } // Entry is a collection of data that needs to be parsed FAST from the entry type Entry struct { SrcIP string SrcHosts []string SrcPort int64 DestIP string DestHosts []string DestPort int64 Timestamp string EventType string Proto string HTTPHost string HTTPUrl string HTTPMethod string JSONLine string DNSVersion int64 DNSRRName string DNSRRType string DNSRCode string DNSRData string DNSType string DNSAnswers []DNSAnswer TLSSni string BytesToClient int64 BytesToServer int64 PktsToClient int64 PktsToServer int64 FlowID string } fever-1.0.8/types/eve.go000066400000000000000000000241551353566242200151200ustar00rootroot00000000000000package types // DCSO FEVER // Copyright (c) 2017, 2019, DCSO GmbH import ( "encoding/json" "strconv" "time" ) const ( // SuricataTimestampFormat is a Go time formatting string describing the // timestamp format used by Suricata's EVE JSON output. SuricataTimestampFormat = "2006-01-02T15:04:05.999999-0700" // EventTypeFlow is the EventType string for the flow type. EventTypeFlow = "flow" // EventTypeAlert is the EventType string for the alert type. EventTypeAlert = "alert" ) type suriTime struct{ time.Time } func (t *suriTime) UnmarshalJSON(b []byte) error { data, err := strconv.Unquote(string(b)) if err != nil { return err } t.Time, err = time.Parse(SuricataTimestampFormat, data) return err } func (t *suriTime) MarshalJSON() ([]byte, error) { return []byte("\"" + t.Time.Format(SuricataTimestampFormat) + "\""), nil } // AlertEvent is am alert sub-object of an EVE entry. type AlertEvent struct { Action string `json:"action"` Gid int `json:"gid"` SignatureID int `json:"signature_id"` Rev int `json:"rev"` Signature string `json:"signature"` Category string `json:"category"` Severity int `json:"severity"` } // DNSEvent is a DNS sub-object of an EVE entry. type DNSEvent struct { Type string `json:"type"` ID int `json:"id"` Rcode string `json:"rcode"` Rrname string `json:"rrname"` Rrtype string `json:"rrtype"` TTL int `json:"ttl"` Rdata string `json:"rdata"` TxID int `json:"tx_id"` } // HTTPEvent is an HTTP sub-object of an EVE entry. type HTTPEvent struct { Hostname string `json:"hostname"` URL string `json:"url"` HTTPUserAgent string `json:"http_user_agent"` HTTPContentType string `json:"http_content_type"` HTTPMethod string `json:"http_method"` Protocol string `json:"protocol"` Status int `json:"status"` Length int `json:"length"` } type fileinfoEvent struct { Filename string `json:"filename"` Magic string `json:"magic"` State string `json:"state"` Md5 string `json:"md5"` Stored bool `json:"stored"` Size int `json:"size"` TxID int `json:"tx_id"` } type flowEvent struct { PktsToserver int `json:"pkts_toserver"` PktsToclient int `json:"pkts_toclient"` BytesToserver int `json:"bytes_toserver"` BytesToclient int `json:"bytes_toclient"` Start *suriTime `json:"start"` End *suriTime `json:"end"` Age int `json:"age"` State string `json:"state"` Reason string `json:"reason"` } // TLSEvent is a TLS sub-object of an EVE entry. type TLSEvent struct { Subject string `json:"subject"` Issuerdn string `json:"issuerdn"` Fingerprint string `json:"fingerprint"` Sni string `json:"sni"` Version string `json:"version"` } type statsEvent struct { Uptime int `json:"uptime"` Capture struct { KernelPackets int `json:"kernel_packets"` KernelDrops int `json:"kernel_drops"` } `json:"capture"` Decoder struct { Pkts int `json:"pkts"` Bytes int64 `json:"bytes"` Invalid int `json:"invalid"` Ipv4 int `json:"ipv4"` Ipv6 int `json:"ipv6"` Ethernet int `json:"ethernet"` Raw int `json:"raw"` Null int `json:"null"` Sll int `json:"sll"` TCP int `json:"tcp"` UDP int `json:"udp"` Sctp int `json:"sctp"` Icmpv4 int `json:"icmpv4"` Icmpv6 int `json:"icmpv6"` Ppp int `json:"ppp"` Pppoe int `json:"pppoe"` Gre int `json:"gre"` Vlan int `json:"vlan"` VlanQinq int `json:"vlan_qinq"` Teredo int `json:"teredo"` Ipv4InIpv6 int `json:"ipv4_in_ipv6"` Ipv6InIpv6 int `json:"ipv6_in_ipv6"` Mpls int `json:"mpls"` AvgPktSize int `json:"avg_pkt_size"` MaxPktSize int `json:"max_pkt_size"` Erspan int `json:"erspan"` Ipraw struct { InvalidIPVersion int `json:"invalid_ip_version"` } `json:"ipraw"` Ltnull struct { PktTooSmall int `json:"pkt_too_small"` UnsupportedType int `json:"unsupported_type"` } `json:"ltnull"` Dce struct { PktTooSmall int `json:"pkt_too_small"` } `json:"dce"` } `json:"decoder"` Flow struct { Memcap int `json:"memcap"` Spare int `json:"spare"` EmergModeEntered int `json:"emerg_mode_entered"` EmergModeOver int `json:"emerg_mode_over"` TCPReuse int `json:"tcp_reuse"` Memuse int `json:"memuse"` } `json:"flow"` Defrag struct { Ipv4 struct { Fragments int `json:"fragments"` Reassembled int `json:"reassembled"` Timeouts int `json:"timeouts"` } `json:"ipv4"` Ipv6 struct { Fragments int `json:"fragments"` Reassembled int `json:"reassembled"` Timeouts int `json:"timeouts"` } `json:"ipv6"` MaxFragHits int `json:"max_frag_hits"` } `json:"defrag"` Stream struct { ThreeWhsAckInWrongDir int `json:"3whs_ack_in_wrong_dir"` ThreeWhsAsyncWrongSeq int `json:"3whs_async_wrong_seq"` ThreeWhsRightSeqWrongAckEvasion int `json:"3whs_right_seq_wrong_ack_evasion"` } `json:"stream"` TCP struct { Sessions int `json:"sessions"` SsnMemcapDrop int `json:"ssn_memcap_drop"` Pseudo int `json:"pseudo"` PseudoFailed int `json:"pseudo_failed"` InvalidChecksum int `json:"invalid_checksum"` NoFlow int `json:"no_flow"` Syn int `json:"syn"` Synack int `json:"synack"` Rst int `json:"rst"` SegmentMemcapDrop int `json:"segment_memcap_drop"` StreamDepthReached int `json:"stream_depth_reached"` ReassemblyGap int `json:"reassembly_gap"` Memuse int `json:"memuse"` ReassemblyMemuse int `json:"reassembly_memuse"` } `json:"tcp"` Detect struct { Alert int `json:"alert"` } `json:"detect"` FlowMgr struct { ClosedPruned int `json:"closed_pruned"` NewPruned int `json:"new_pruned"` EstPruned int `json:"est_pruned"` } `json:"flow_mgr"` DNS struct { Memuse int `json:"memuse"` MemcapState int `json:"memcap_state"` MemcapGlobal int `json:"memcap_global"` } `json:"dns"` HTTP struct { Memuse int `json:"memuse"` Memcap int `json:"memcap"` } `json:"http"` } type sshEvent struct { Client struct { ProtoVersion string `json:"proto_version"` SoftwareVersion string `json:"software_version"` } `json:"client"` Server struct { ProtoVersion string `json:"proto_version"` SoftwareVersion string `json:"software_version"` } `json:"server"` } type smtpEvent struct { Helo string `json:"helo"` MailFrom string `json:"mail_from"` RcptTo []string `json:"rcpt_to"` } type tcpEvent struct { State string `json:"state"` Syn bool `json:"syn"` TCPflags string `json:"tcp_flags"` TCPflagsTc string `json:"tcp_flags_tc"` TCPflagsTs string `json:"tcp_flags_ts"` } type emailEvent struct { Status string `json:"status"` } type packetInfo struct { Linktype int `json:"linktype"` } // ExtraInfo contains non-EVE-standard extra information type ExtraInfo struct { BloomIOC string `json:"bloom-ioc,omitempty"` } // EveEvent is the huge struct which can contain a parsed suricata eve.json // log event. type EveEvent struct { Timestamp *suriTime `json:"timestamp"` EventType string `json:"event_type"` FlowID int64 `json:"flow_id,omitempty"` InIface string `json:"in_iface,omitempty"` SrcIP string `json:"src_ip,omitempty"` SrcPort int `json:"src_port,omitempty"` SrcHost []string `json:"src_host,omitempty"` DestIP string `json:"dest_ip,omitempty"` DestPort int `json:"dest_port,omitempty"` DestHost []string `json:"dest_host,omitempty"` Proto string `json:"proto,omitempty"` AppProto string `json:"app_proto,omitempty"` TxID int `json:"tx_id,omitempty"` TCP *tcpEvent `json:"tcp,omitempty"` PacketInfo *packetInfo `json:"packet_info,omitempty"` Alert *AlertEvent `json:"alert,omitempty"` Payload string `json:"payload,omitempty"` PayloadPrintable string `json:"payload_printable,omitempty"` Stream int `json:"stream,omitempty"` Packet string `json:"packet,omitempty"` SMTP *smtpEvent `json:"smtp,omitempty"` Email *emailEvent `json:"email,omitempty"` DNS *DNSEvent `json:"dns,omitempty"` HTTP *HTTPEvent `json:"http,omitempty"` Fileinfo *fileinfoEvent `json:"fileinfo,omitempty"` Flow *flowEvent `json:"flow,omitempty"` SSH *sshEvent `json:"ssh,omitempty"` TLS *TLSEvent `json:"tls,omitempty"` Stats *statsEvent `json:"stats,omitempty"` ExtraInfo *ExtraInfo `json:"_extra,omitempty"` } // EveOutEvent is the version of EveEvent that we use to marshal the output for // downstream consumption. type EveOutEvent EveEvent // MarshalJSON for EveOutEvents ensures that FlowIDs are represented in JSON // as a string. This is necessary to work around some arbitrary limitations such // as syslog-ng's funny JSON parser implementation, which truncates large // integers found in JSON values. func (e EveOutEvent) MarshalJSON() ([]byte, error) { type Alias EveOutEvent v, err := json.Marshal(&struct { FlowID string `json:"flow_id"` Alias }{ FlowID: strconv.FormatInt(e.FlowID, 10), Alias: (Alias)(e), }) return v, err } // UnmarshalJSON implements filling an EveOutEvent from a byte slice, converting // the string in the FlowID field back into a number. This is necessary to // ensure that a round-trip (write+read) works. func (e *EveOutEvent) UnmarshalJSON(d []byte) error { type EveOutEvent2 EveOutEvent x := struct { EveOutEvent2 FlowID json.Number `json:"flow_id"` }{EveOutEvent2: EveOutEvent2(*e)} if err := json.Unmarshal(d, &x); err != nil { return err } *e = EveOutEvent(x.EveOutEvent2) var err error e.FlowID, _ = x.FlowID.Int64() // ignore error; defaulting to zero return err } fever-1.0.8/types/eve_test.go000066400000000000000000000030651353566242200161540ustar00rootroot00000000000000package types // DCSO FEVER // Copyright (c) 2019, DCSO GmbH import ( "encoding/json" "strings" "testing" "time" ) func TestEVERoundtripTimestamp(t *testing.T) { timeCmp, _ := time.Parse(time.RFC3339, "2019-08-06 13:30:01.690233 +0200 CEST") ee := EveEvent{ Timestamp: &suriTime{ Time: timeCmp, }, EventType: "http", SrcIP: "1.2.3.4", SrcPort: 2222, DestIP: "3.4.5.6", DestPort: 80, Proto: "tcp", FlowID: 642, HTTP: &HTTPEvent{ Hostname: "test", URL: "/", }, } out, err := json.Marshal(ee) if err != nil { t.Error(err) } var inEVE EveEvent err = json.Unmarshal(out, &inEVE) if err != nil { t.Error(err) } if !inEVE.Timestamp.Time.Equal(ee.Timestamp.Time) { t.Fatalf("timestamp round-trip failed: %v <-> %v", inEVE.Timestamp, ee.Timestamp) } } func TestEVEStringFlowIDRoundtrip(t *testing.T) { timeCmp, _ := time.Parse(time.RFC3339, "2019-08-06 13:30:01.690233 +0200 CEST") ee := EveOutEvent{ Timestamp: &suriTime{ Time: timeCmp, }, EventType: "http", SrcIP: "1.2.3.4", SrcPort: 2222, DestIP: "3.4.5.6", DestPort: 80, Proto: "tcp", FlowID: 649, HTTP: &HTTPEvent{ Hostname: "test", URL: "/", }, } out, err := json.Marshal(ee) if err != nil { t.Error(err) } var inEVE EveOutEvent err = json.Unmarshal(out, &inEVE) if err != nil { t.Error(err) } if !strings.Contains(string(out), `"flow_id":"649"`) { t.Fatalf("flow ID missing") } if inEVE.FlowID != ee.FlowID { t.Fatalf("round-trip failed: %v <-> %v", inEVE.FlowID, ee.FlowID) } } fever-1.0.8/types/flow_event.go000066400000000000000000000126521353566242200165100ustar00rootroot00000000000000package types // DCSO FEVER // Copyright (c) 2017, 2018, DCSO GmbH import ( "encoding/binary" "errors" "fmt" "io" "net" "time" ) // FlowEvent stores the meta-data of a flow event in a compact, binary form. type FlowEvent struct { Timestamp uint64 Format byte SrcIP []byte DestIP []byte SrcPort uint16 DestPort uint16 BytesToServer uint32 BytesToClient uint32 PktsToServer uint32 PktsToClient uint32 Flags uint16 } // FlowEventFlags defines various flags for use in FlowEvent.Flags (e.g. the protocol). var FlowEventFlags = map[string]uint16{ "TCP": 1 << 0, "UDP": 1 << 1, } var maxBytes = int64(^uint32(0)) func parseIP(stringIP string) ([]byte, error) { ip := net.ParseIP(stringIP) if ip == nil { return nil, errors.New("invalid IP") } ipv4 := ip.To4() if ipv4 == nil { //this is an IPv6 address reverseIP(ip) return ip, nil } //this is an IPv4 address reverseIP(ipv4) return ipv4, nil } func reverseIP(b []byte) { for i := 0; i < len(b); i++ { b[i], b[len(b)-i-1] = b[len(b)-i-1], b[i] } } // FromEntry populates a FlowEvent using an Entry func (fe *FlowEvent) FromEntry(e *Entry) error { ts, err := time.Parse(SuricataTimestampFormat, e.Timestamp) if err != nil { return err } srcIP, err := parseIP(e.SrcIP) if err != nil { return err } destIP, err := parseIP(e.DestIP) if err != nil { return err } flags := uint16(0) if e.Proto == "TCP" { flags |= FlowEventFlags["TCP"] } if e.Proto == "UDP" { flags |= FlowEventFlags["UDP"] } fe.Timestamp = uint64(ts.UnixNano()) fe.SrcIP = srcIP fe.SrcPort = uint16(e.SrcPort) fe.DestIP = destIP fe.DestPort = uint16(e.DestPort) fe.Format = 1 if len(srcIP) == 16 { fe.Format |= 1 << 1 } fe.Format |= 1 << 2 //bits 3,4,5 and 6 mark the version (currently 1) if len(srcIP) != len(destIP) { return fmt.Errorf("source and destination IPS have different lengths O.o") } if e.BytesToServer > maxBytes { return errors.New("BytesToServer is too large") } if e.BytesToClient > maxBytes { return errors.New("BytesToClient is too large") } if e.PktsToServer > maxBytes { return errors.New("PktsToServer is too large") } if e.PktsToClient > maxBytes { return errors.New("PktsToClient is too large") } fe.BytesToServer = uint32(e.BytesToServer) fe.BytesToClient = uint32(e.BytesToClient) fe.PktsToServer = uint32(e.PktsToServer) fe.PktsToClient = uint32(e.PktsToClient) fe.Flags = flags return nil } // Unmarshal reads a FlowEvent from an io.Reader. func (fe *FlowEvent) Unmarshal(reader io.Reader) error { bs1 := make([]byte, 1) bs2 := make([]byte, 2) bs4 := make([]byte, 4) bs8 := make([]byte, 8) //format if _, err := io.ReadFull(reader, bs1); err != nil { return err } fe.Format = bs1[0] if fe.Format&0x01 != 0x01 { return fmt.Errorf("invalid format byte (should start with a 1)") } isIPv6 := (fe.Format & 0x02) == 0x02 //timestamp if _, err := io.ReadFull(reader, bs8); err != nil { return err } fe.Timestamp = binary.LittleEndian.Uint64(bs8) //src ip if isIPv6 { fe.SrcIP = make([]byte, 4*4) if _, err := io.ReadFull(reader, fe.SrcIP); err != nil { return err } } else { fe.SrcIP = make([]byte, 4) if _, err := io.ReadFull(reader, fe.SrcIP); err != nil { return err } } //src port if _, err := io.ReadFull(reader, bs2); err != nil { return err } fe.SrcPort = binary.LittleEndian.Uint16(bs2) //dest ip if isIPv6 { fe.DestIP = make([]byte, 4*4) if _, err := io.ReadFull(reader, fe.DestIP); err != nil { return err } } else { fe.DestIP = make([]byte, 4) if _, err := io.ReadFull(reader, fe.DestIP); err != nil { return err } } //dest port if _, err := io.ReadFull(reader, bs2); err != nil { return err } fe.DestPort = binary.LittleEndian.Uint16(bs2) //PktsToServer if _, err := io.ReadFull(reader, bs4); err != nil { return err } fe.PktsToServer = binary.LittleEndian.Uint32(bs4) //PktsToClient if _, err := io.ReadFull(reader, bs4); err != nil { return err } fe.PktsToClient = binary.LittleEndian.Uint32(bs4) //BytesToServer if _, err := io.ReadFull(reader, bs4); err != nil { return err } fe.BytesToServer = binary.LittleEndian.Uint32(bs4) //BytesToClient if _, err := io.ReadFull(reader, bs4); err != nil { return err } fe.BytesToClient = binary.LittleEndian.Uint32(bs4) //Flags if _, err := io.ReadFull(reader, bs2); err != nil { return err } fe.Flags = binary.LittleEndian.Uint16(bs2) return nil } // Marshal writes a FlowEvent to an io.Writer. func (fe *FlowEvent) Marshal(writer io.Writer) error { bs1 := make([]byte, 1) bs2 := make([]byte, 2) bs4 := make([]byte, 4) bs8 := make([]byte, 8) //format bs1[0] = fe.Format writer.Write(bs1) //timestamp binary.LittleEndian.PutUint64(bs8, fe.Timestamp) writer.Write(bs8) //src ip writer.Write(fe.SrcIP) //src port binary.LittleEndian.PutUint16(bs2, fe.SrcPort) writer.Write(bs2) //dest ip writer.Write(fe.DestIP) //dest port binary.LittleEndian.PutUint16(bs2, fe.DestPort) writer.Write(bs2) //PktsToServer binary.LittleEndian.PutUint32(bs4, fe.PktsToServer) writer.Write(bs4) //PktsToClient binary.LittleEndian.PutUint32(bs4, fe.PktsToClient) writer.Write(bs4) //BytesToServer binary.LittleEndian.PutUint32(bs4, fe.BytesToServer) writer.Write(bs4) //BytesToClient binary.LittleEndian.PutUint32(bs4, fe.BytesToClient) writer.Write(bs4) //Flags binary.LittleEndian.PutUint16(bs2, fe.Flags) writer.Write(bs2) return nil } fever-1.0.8/types/flow_event_test.go000066400000000000000000000007471353566242200175510ustar00rootroot00000000000000package types // DCSO FEVER // Copyright (c) 2017, 2018, DCSO GmbH import ( "bytes" "net" "testing" ) func TestIPParsing(t *testing.T) { ipv4 := "8.8.8.8" ipv6 := "2001:0db8:85a3:0000:0000:8a2e:0370:7334" parsedIPv4, err := parseIP(ipv4) if err != nil || !bytes.Equal(parsedIPv4, net.ParseIP(ipv4).To4()) { t.Fatal("Conversion failed!") } parsedIPv6, err := parseIP(ipv6) if err != nil || !bytes.Equal(parsedIPv6, net.ParseIP(ipv6)) { t.Fatal("Conversion failed!") } } fever-1.0.8/util/000077500000000000000000000000001353566242200136145ustar00rootroot00000000000000fever-1.0.8/util/consumer.go000066400000000000000000000112241353566242200157760ustar00rootroot00000000000000package util // Parts of this code have been taken from // https://github.com/streadway/amqp/blob/master/_examples/simple-consumer/consumer.go // released under the license of the main streadway/amqp project: // // Copyright (c) 2012, Sean Treadway, SoundCloud Ltd. // All rights reserved. // // Redistribution and use in source and binary forms, with or without // modification, are permitted provided that the following conditions are met: // // Redistributions of source code must retain the above copyright notice, this // list of conditions and the following disclaimer. // // Redistributions in binary form must reproduce the above copyright notice, this // list of conditions and the following disclaimer in the documentation and/or // other materials provided with the distribution. // // THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND // ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED // WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE // DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE // FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL // DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR // SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER // CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, // OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE // OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import ( "fmt" "github.com/NeowayLabs/wabbit" "github.com/NeowayLabs/wabbit/amqptest" log "github.com/sirupsen/logrus" ) // Consumer reads and processes messages from a fake RabbitMQ server. type Consumer struct { conn wabbit.Conn channel wabbit.Channel tag string done chan error Callback func(wabbit.Delivery) } // NewConsumer creates a new consumer with the given properties. The callback // function is called for each delivery accepted from a consumer channel. func NewConsumer(amqpURI, exchange, exchangeType, queueName, key, ctag string, callback func(wabbit.Delivery)) (*Consumer, error) { var err error c := &Consumer{ conn: nil, channel: nil, tag: ctag, done: make(chan error), Callback: callback, } log.Debugf("dialing %q", amqpURI) c.conn, err = amqptest.Dial(amqpURI) if err != nil { return nil, fmt.Errorf("dial: %s", err) } log.Debugf("got Connection, getting Channel") c.channel, err = c.conn.Channel() if err != nil { return nil, fmt.Errorf("channel: %s", err) } log.Debugf("got Channel, declaring Exchange (%q)", exchange) if err = c.channel.ExchangeDeclare( exchange, // name of the exchange exchangeType, // type wabbit.Option{ "durable": true, "delete": false, "internal": false, "noWait": false, }, ); err != nil { return nil, fmt.Errorf("exchange declare: %s", err) } queue, err := c.channel.QueueDeclare( queueName, // name of the queue wabbit.Option{ "durable": true, "delete": false, "exclusive": false, "noWait": false, }, ) if err != nil { return nil, fmt.Errorf("queue declare: %s", err) } log.Debugf("declared Queue (%q %d messages, %d consumers), binding to Exchange (key %q)", queue.Name(), queue.Messages(), queue.Consumers(), key) if err = c.channel.QueueBind( queue.Name(), // name of the queue key, // bindingKey exchange, // sourceExchange wabbit.Option{ "noWait": false, }, ); err != nil { return nil, fmt.Errorf("queue bind: %s", err) } log.Debugf("Queue bound to Exchange, starting Consume (consumer tag %q)", c.tag) deliveries, err := c.channel.Consume( queue.Name(), // name c.tag, // consumerTag, wabbit.Option{ "exclusive": false, "noLocal": false, "noWait": false, }, ) if err != nil { return nil, fmt.Errorf("queue consume: %s", err) } go handle(deliveries, c.done, c.Callback) return c, nil } // Shutdown shuts down a consumer, closing down its channels and connections. func (c *Consumer) Shutdown() error { // will close() the deliveries channel if err := c.channel.Close(); err != nil { return fmt.Errorf("channel close failed: %s", err) } if err := c.conn.Close(); err != nil { return fmt.Errorf("AMQP connection close error: %s", err) } defer log.Debugf("AMQP shutdown OK") // wait for handle() to exit return <-c.done } func handle(deliveries <-chan wabbit.Delivery, done chan error, callback func(wabbit.Delivery)) { for d := range deliveries { log.Debugf( "got %dB delivery: [%v] %q", len(d.Body()), d.DeliveryTag(), d.Body(), ) callback(d) d.Ack(false) } done <- nil } fever-1.0.8/util/eventfilter.go000066400000000000000000000022511353566242200164720ustar00rootroot00000000000000package util // DCSO FEVER // Copyright (c) 2017, DCSO GmbH import ( log "github.com/sirupsen/logrus" ) var filter map[string]bool // ForwardAllEvents is set to true if the user has selected to skip event // type filtering. var ForwardAllEvents bool // PrepareEventFilter registers the passed string array slice into the list of // event types to be forwarded to the secondary processor. func PrepareEventFilter(list []string, forwardall bool) { filter = make(map[string]bool) ForwardAllEvents = forwardall if ForwardAllEvents { log.WithFields(log.Fields{ "domain": "forward", }).Info("forwarding all event types") } for _, s := range list { log.WithFields(log.Fields{ "domain": "forward", "type": s, }).Info("event type added") filter[s] = true } } // GetAllowedTypes returns a slice of strings with all forwarded types. func GetAllowedTypes() []string { allowedTypes := make([]string, 0) for k := range filter { allowedTypes = append(allowedTypes, k) } return allowedTypes } // AllowType returns true if the event type indicated by the string t is allowed // to be forwarded. func AllowType(t string) bool { return (ForwardAllEvents || filter[t]) } fever-1.0.8/util/eventfilter_test.go000066400000000000000000000021571353566242200175360ustar00rootroot00000000000000package util // DCSO FEVER // Copyright (c) 2017, DCSO GmbH import ( "testing" ) func TestEventFilterEmpty(t *testing.T) { PrepareEventFilter([]string{}, false) if AllowType("foo") { t.Fail() } if len(GetAllowedTypes()) > 0 { t.Fail() } } func TestEventFilterEmptyForwardAllSelected(t *testing.T) { PrepareEventFilter([]string{"foo", "bar"}, false) if !AllowType("foo") { t.Fatal("foo not allowed") } if !AllowType("bar") { t.Fatal("bar not allowed") } if AllowType("baz") { t.Fatal("baz allowed but shouldn't be") } if len(GetAllowedTypes()) != 2 { t.Fail() } } func TestEventFilterEmptyForwardAllSelectedDuplicate(t *testing.T) { PrepareEventFilter([]string{"foo", "foo"}, false) if !AllowType("foo") { t.Fatal("foo not allowed") } if AllowType("bar") { t.Fatal("bar allowed but shouldn't be") } if AllowType("baz") { t.Fatal("baz allowed but shouldn't be") } if len(GetAllowedTypes()) != 1 { t.Fail() } } func TestEventFilterEmptyForwardAll(t *testing.T) { PrepareEventFilter([]string{}, true) if !AllowType("foo") { t.Fail() } if len(GetAllowedTypes()) > 0 { t.Fail() } } fever-1.0.8/util/hostnamer.go000066400000000000000000000021611353566242200161430ustar00rootroot00000000000000package util import ( "net" "strings" "sync" "time" "github.com/patrickmn/go-cache" ) // HostNamer is a component that provides cached hostnames for IP // addresses passed as strings. type HostNamer struct { Cache *cache.Cache Lock sync.Mutex } // NewHostNamer returns a new HostNamer with the given default expiration time. // Data entries will be purged after each cleanupInterval. func NewHostNamer(defaultExpiration, cleanupInterval time.Duration) *HostNamer { return &HostNamer{ Cache: cache.New(defaultExpiration, cleanupInterval), } } // GetHostname returns a list of host names for a given IP address. func (n *HostNamer) GetHostname(ipAddr string) ([]string, error) { n.Lock.Lock() defer n.Lock.Unlock() val, found := n.Cache.Get(ipAddr) if found { return val.([]string), nil } hns, err := net.LookupAddr(ipAddr) if err != nil { return nil, err } for i, hn := range hns { hns[i] = strings.TrimRight(hn, ".") } n.Cache.Set(ipAddr, hns, cache.DefaultExpiration) val = hns return val.([]string), nil } // Flush clears the cache of a HostNamer. func (n *HostNamer) Flush() { n.Cache.Flush() } fever-1.0.8/util/hostnamer_test.go000066400000000000000000000020011353566242200171730ustar00rootroot00000000000000package util import ( "testing" "time" log "github.com/sirupsen/logrus" ) func _TestHostNamerQuad8(t *testing.T, ip string) { hn := NewHostNamer(5*time.Second, 5*time.Second) v, err := hn.GetHostname(ip) if err != nil { t.Fatal(err) } if len(v) == 0 { t.Fatal("no response") } else { log.Debugf("got response %v", v) } v, err = hn.GetHostname(ip) if err != nil { t.Fatal(err) } if len(v) == 0 { t.Fatal("no response") } else { log.Debugf("got response %v", v) } time.Sleep(6 * time.Second) v, err = hn.GetHostname(ip) if err != nil { t.Fatal(err) } if len(v) == 0 { t.Fatal("no response") } else { log.Debugf("got response %v", v) } } func TestHostNamerQuad8v4(t *testing.T) { _TestHostNamerQuad8(t, "8.8.8.8") } func TestHostNamerQuad8v6(t *testing.T) { _TestHostNamerQuad8(t, "2001:4860:4860::8888") } func TestHostNamerInvalid(t *testing.T) { hn := NewHostNamer(5*time.Second, 5*time.Second) _, err := hn.GetHostname("8.") if err == nil { t.Fatal("missed error") } } fever-1.0.8/util/performance_stats_encoder.go000066400000000000000000000037051353566242200213660ustar00rootroot00000000000000package util // DCSO FEVER // Copyright (c) 2017, DCSO GmbH import ( "bytes" "strings" "sync" "time" "github.com/DCSO/fluxline" log "github.com/sirupsen/logrus" ) // PerformanceStatsEncoder is a component to collect, encode and submit data // to an InfluxDb via RabbitMQ. type PerformanceStatsEncoder struct { sync.RWMutex Encoder *fluxline.Encoder Buffer bytes.Buffer Logger *log.Entry Tags map[string]string Submitter StatsSubmitter SubmitPeriod time.Duration LastSubmitted time.Time DummyMode bool } // MakePerformanceStatsEncoder creates a new stats encoder, submitting via // the given StatsSubmitter, with at least submitPeriod time between submissions. // if dummyMode is set, then the result will be printed to stdout instead of // submitting. func MakePerformanceStatsEncoder(statsSubmitter StatsSubmitter, submitPeriod time.Duration, dummyMode bool) *PerformanceStatsEncoder { a := &PerformanceStatsEncoder{ Logger: log.WithFields(log.Fields{ "domain": "statscollect", }), Submitter: statsSubmitter, DummyMode: dummyMode, Tags: make(map[string]string), LastSubmitted: time.Now(), SubmitPeriod: submitPeriod, } a.Encoder = fluxline.NewEncoder(&a.Buffer) return a } // Submit encodes the data annotated with 'influx' tags in the passed struct and // sends it to the configured submitter. func (a *PerformanceStatsEncoder) Submit(val interface{}) { a.Lock() a.Buffer.Reset() err := a.Encoder.EncodeWithoutTypes(ToolName, val, a.Tags) if err != nil { if a.Logger != nil { a.Logger.WithFields(log.Fields{}).Warn(err) } } line := strings.TrimSpace(a.Buffer.String()) if line == "" { a.Logger.WithFields(log.Fields{}).Warn("skipping empty influx line") a.Unlock() return } jsonString := []byte(line) a.Submitter.SubmitWithHeaders(jsonString, "", "text/plain", map[string]string{ "database": "telegraf", "retention_policy": "default", }) a.Unlock() } fever-1.0.8/util/performance_stats_encoder_test.go000066400000000000000000000074031353566242200224240ustar00rootroot00000000000000package util // DCSO FEVER // Copyright (c) 2017, 2019, DCSO GmbH import ( "fmt" "regexp" "sync" "testing" "time" "github.com/NeowayLabs/wabbit" "github.com/NeowayLabs/wabbit/amqptest" "github.com/NeowayLabs/wabbit/amqptest/server" log "github.com/sirupsen/logrus" ) var testStruct = struct { TestVal uint64 `influx:"testval"` TestVal2 uint64 `influx:"testvalue"` TestVal3 uint64 }{ 1, 2, 3, } var testStructUntagged = struct { TestVal uint64 TestVal2 uint64 TestVal3 uint64 }{ 1, 2, 3, } func TestPerformanceStatsEncoderEmpty(t *testing.T) { serverURL := "amqp://sensor:sensor@127.0.0.1:9999/%2f/" // start mock AMQP server fakeServer := server.NewServer(serverURL) fakeServer.Start() defer fakeServer.Stop() // set up consumer results := make([]string, 0) c, err := NewConsumer(serverURL, "tdh.metrics", "direct", "tdh.metrics.testqueue", "", "", func(d wabbit.Delivery) { results = append(results, string(d.Body())) }) if err != nil { t.Fatal(err) } defer c.Shutdown() // set up submitter statssubmitter, err := MakeAMQPSubmitterWithReconnector(serverURL, "tdh.metrics", true, func(url string) (wabbit.Conn, error) { // we pass in a custom reconnector which uses the amqptest implementation var conn wabbit.Conn conn, err = amqptest.Dial(url) return conn, err }) if err != nil { t.Fatal(err) } defer statssubmitter.Finish() // create InfluxDB line protocol encoder/submitter pse := MakePerformanceStatsEncoder(statssubmitter, 1*time.Second, false) pse.Submit(testStructUntagged) time.Sleep(1 * time.Second) if len(results) != 0 { t.Fatalf("unexpected result length: %d !=0", len(results)) } } func TestPerformanceStatsEncoder(t *testing.T) { serverURL := "amqp://sensor:sensor@127.0.0.1:9999/%2f/" // start mock AMQP server fakeServer := server.NewServer(serverURL) fakeServer.Start() defer fakeServer.Stop() // set up consumer results := make([]string, 0) gateChan := make(chan bool) var resultsLock sync.Mutex c, err := NewConsumer(serverURL, "tdh.metrics", "direct", "tdh.metrics.testqueue", "", "", func(d wabbit.Delivery) { resultsLock.Lock() results = append(results, string(d.Body())) resultsLock.Unlock() log.Info(string(d.Body())) gateChan <- true }) if err != nil { t.Fatal(err) } defer c.Shutdown() // set up submitter statssubmitter, err := MakeAMQPSubmitterWithReconnector(serverURL, "tdh.metrics", true, func(url string) (wabbit.Conn, error) { // we pass in a custom reconnector which uses the amqptest implementation var conn wabbit.Conn conn, err = amqptest.Dial(url) return conn, err }) if err != nil { t.Fatal(err) } defer statssubmitter.Finish() // create InfluxDB line protocol encoder/submitter pse := MakePerformanceStatsEncoder(statssubmitter, 1*time.Second, false) pse.Submit(testStruct) <-gateChan pse.Submit(testStruct) <-gateChan testStruct.TestVal = 3 pse.Submit(testStruct) <-gateChan pse.Submit(testStruct) <-gateChan resultsLock.Lock() if len(results) != 4 { t.Fatalf("unexpected result length: %d != 4", len(results)) } if match, _ := regexp.Match(fmt.Sprintf("^%s,[^ ]+ testval=1,testvalue=2", ToolName), []byte(results[0])); !match { t.Fatalf("unexpected match content: %s", results[0]) } if match, _ := regexp.Match(fmt.Sprintf("^%s,[^ ]+ testval=1,testvalue=2", ToolName), []byte(results[1])); !match { t.Fatalf("unexpected match content: %s", results[1]) } if match, _ := regexp.Match(fmt.Sprintf("^%s,[^ ]+ testval=3,testvalue=2", ToolName), []byte(results[2])); !match { t.Fatalf("unexpected match content: %s", results[2]) } if match, _ := regexp.Match(fmt.Sprintf("^%s,[^ ]+ testval=3,testvalue=2", ToolName), []byte(results[3])); !match { t.Fatalf("unexpected match content: %s", results[3]) } resultsLock.Unlock() } fever-1.0.8/util/submitter.go000066400000000000000000000005501353566242200161610ustar00rootroot00000000000000package util // DCSO FEVER // Copyright (c) 2017, DCSO GmbH // StatsSubmitter is an interface for an entity that sends JSON data to an endpoint type StatsSubmitter interface { Submit(rawData []byte, key string, contentType string) SubmitWithHeaders(rawData []byte, key string, contentType string, myHeaders map[string]string) UseCompression() Finish() } fever-1.0.8/util/submitter_amqp.go000066400000000000000000000140751353566242200172060ustar00rootroot00000000000000package util // DCSO FEVER // Copyright (c) 2017, 2018, 2019, DCSO GmbH import ( "bytes" "compress/gzip" "sync" "time" "github.com/NeowayLabs/wabbit" "github.com/NeowayLabs/wabbit/amqp" log "github.com/sirupsen/logrus" origamqp "github.com/streadway/amqp" ) // AMQPBaseSubmitter is the base engine that sends reports to a RabbitMQ host and // handles reconnection. type AMQPBaseSubmitter struct { URL string Verbose bool SensorID string Conn wabbit.Conn Channel wabbit.Channel StopReconnection chan bool ErrorChan chan wabbit.Error Logger *log.Entry ChanMutex sync.Mutex ConnMutex sync.Mutex Reconnector func(string) (wabbit.Conn, error) NofSubmitters uint } // AMQPSubmitter is a StatsSubmitter that sends reports to a RabbitMQ exchange. type AMQPSubmitter struct { Submitter *AMQPBaseSubmitter Target string Compress bool } const ( amqpReconnDelay = 5 * time.Second ) var ( gSubmitters = make(map[string]*AMQPBaseSubmitter) ) func defaultReconnector(amqpURI string) (wabbit.Conn, error) { conn, err := amqp.Dial(amqpURI) if err != nil { return nil, err } return conn, err } func reconnectOnFailure(s *AMQPBaseSubmitter) { errChan := s.ErrorChan for { select { case <-s.StopReconnection: return case rabbitErr := <-errChan: if rabbitErr != nil { log.Warnf("RabbitMQ connection failed: %s", rabbitErr.Reason()) s.ChanMutex.Lock() for { time.Sleep(amqpReconnDelay) connErr := s.connect() if connErr != nil { log.Warnf("RabbitMQ error: %s", connErr) } else { log.Infof("Reestablished connection to %s", s.URL) errChan = make(chan wabbit.Error) s.Conn.NotifyClose(errChan) s.ErrorChan = errChan break } } s.ChanMutex.Unlock() } } } } func (s *AMQPBaseSubmitter) connect() error { var err error s.ConnMutex.Lock() s.Logger.Debugf("calling reconnector") s.Conn, err = s.Reconnector(s.URL) if err != nil { s.Conn = nil s.ConnMutex.Unlock() return err } s.Channel, err = s.Conn.Channel() if err != nil { s.Conn.Close() s.ConnMutex.Unlock() return err } log.Debugf("Submitter established connection to %s", s.URL) s.ConnMutex.Unlock() return nil } // MakeAMQPSubmitterWithReconnector creates a new submitter connected to a // RabbitMQ server at the given URL, using the reconnector function as a means // to Dial() in order to obtain a Connection object. func MakeAMQPSubmitterWithReconnector(url string, target string, verbose bool, reconnector func(string) (wabbit.Conn, error)) (*AMQPSubmitter, error) { var err error var mySubmitter *AMQPBaseSubmitter if _, ok := gSubmitters[url]; !ok { mySubmitter = &AMQPBaseSubmitter{ URL: url, Verbose: verbose, ErrorChan: make(chan wabbit.Error), Reconnector: reconnector, StopReconnection: make(chan bool), } mySubmitter.Logger = log.WithFields(log.Fields{ "domain": "submitter", "submitter": "AMQP", "url": url, }) mySubmitter.Logger.Debugf("new base submitter created") mySubmitter.SensorID, err = GetSensorID() if err != nil { return nil, err } err = mySubmitter.connect() if err != nil { return nil, err } mySubmitter.Conn.NotifyClose(mySubmitter.ErrorChan) go reconnectOnFailure(mySubmitter) gSubmitters[url] = mySubmitter mySubmitter.NofSubmitters++ mySubmitter.Logger.Debugf("number of submitters now %d", mySubmitter.NofSubmitters) } else { mySubmitter = gSubmitters[url] } retSubmitter := &AMQPSubmitter{ Submitter: mySubmitter, Target: target, } return retSubmitter, nil } // MakeAMQPSubmitter creates a new submitter connected to a RabbitMQ server // at the given URL. func MakeAMQPSubmitter(url string, target string, verbose bool) (*AMQPSubmitter, error) { return MakeAMQPSubmitterWithReconnector(url, target, verbose, defaultReconnector) } // UseCompression enables gzip compression of submitted payloads. func (s *AMQPSubmitter) UseCompression() { s.Compress = true } // Submit sends the rawData payload via the registered RabbitMQ connection. func (s *AMQPSubmitter) Submit(rawData []byte, key string, contentType string) { s.SubmitWithHeaders(rawData, key, contentType, nil) } // SubmitWithHeaders sends the rawData payload via the registered RabbitMQ connection, // adding some extra key-value pairs to the header. func (s *AMQPSubmitter) SubmitWithHeaders(rawData []byte, key string, contentType string, myHeaders map[string]string) { var payload []byte var encoding string var isCompressed string if s.Compress { var b bytes.Buffer w := gzip.NewWriter(&b) w.Write(rawData) w.Close() payload = b.Bytes() isCompressed = "true" encoding = "gzip" } else { payload = rawData isCompressed = "false" } option := wabbit.Option{ "contentType": contentType, "contentEncoding": encoding, "headers": origamqp.Table{ "sensor_id": s.Submitter.SensorID, "compressed": isCompressed, }, } for k, v := range myHeaders { option["headers"].(origamqp.Table)[k] = v } err := s.Submitter.Channel.Publish( s.Target, // exchange key, // routing key payload, option) if err != nil { s.Submitter.Logger.Warn(err) } else { s.Submitter.Logger.WithFields(log.Fields{ "rawsize": len(rawData), "payloadsize": len(payload), }).Infof("submission to %s:%s (%s) successful", s.Submitter.URL, s.Target, key) } } // Finish cleans up the AMQP connection (reference counted). func (s *AMQPSubmitter) Finish() { s.Submitter.Logger.Debugf("finishing submitter %v -> %v", s, s.Submitter) if s.Submitter.NofSubmitters == 1 { close(s.Submitter.StopReconnection) if s.Submitter.Verbose { s.Submitter.Logger.Info("closing connection") } if s.Submitter.Channel != nil { s.Submitter.Channel.Close() } s.Submitter.ConnMutex.Lock() if s.Submitter.Conn != nil { s.Submitter.Conn.Close() } s.Submitter.ConnMutex.Unlock() delete(gSubmitters, s.Submitter.URL) } else { s.Submitter.NofSubmitters-- s.Submitter.Logger.Debugf("number of submitters now %d", s.Submitter.NofSubmitters) } } fever-1.0.8/util/submitter_dummy.go000066400000000000000000000032741353566242200174020ustar00rootroot00000000000000package util // DCSO FEVER // Copyright (c) 2018, DCSO GmbH import ( "unicode" log "github.com/sirupsen/logrus" ) // DummySubmitter is a StatsSubmitter that just logs submissions without // sending them over the network. type DummySubmitter struct { Logger *log.Entry SensorID string } func isASCIIPrintable(s string) bool { for _, r := range s { if r > unicode.MaxASCII || !unicode.IsPrint(r) { return false } } return true } // MakeDummySubmitter creates a new submitter just logging to the default log // target. func MakeDummySubmitter() (*DummySubmitter, error) { mySubmitter := &DummySubmitter{ Logger: log.WithFields(log.Fields{ "domain": "submitter", "submitter": "dummy", }), } sensorID, err := GetSensorID() if err != nil { return nil, err } mySubmitter.SensorID = sensorID return mySubmitter, nil } // UseCompression enables gzip compression of submitted payloads (not // applicable in this implementation). func (s *DummySubmitter) UseCompression() { // pass } // Submit logs the rawData payload. func (s *DummySubmitter) Submit(rawData []byte, key string, contentType string) { s.SubmitWithHeaders(rawData, key, contentType, nil) } // SubmitWithHeaders logs rawData payload, adding some extra key-value pairs to // the header. func (s *DummySubmitter) SubmitWithHeaders(rawData []byte, key string, contentType string, myHeaders map[string]string) { bytestring := string(rawData) if isASCIIPrintable(bytestring) { s.Logger.Info(bytestring) } else { s.Logger.Infof("%s (%s) - submitting non-printable byte array of length %d", key, contentType, len(rawData)) } } // Finish is a no-op in this implementation. func (s *DummySubmitter) Finish() { // pass } fever-1.0.8/util/submitter_test.go000066400000000000000000000101551353566242200172220ustar00rootroot00000000000000package util // DCSO FEVER // Copyright (c) 2017, DCSO GmbH import ( "bytes" "fmt" "testing" "time" log "github.com/sirupsen/logrus" "github.com/NeowayLabs/wabbit" "github.com/NeowayLabs/wabbit/amqptest" "github.com/NeowayLabs/wabbit/amqptest/server" ) func TestInvalidReconnector(t *testing.T) { log.SetLevel(log.DebugLevel) submitter, err := MakeAMQPSubmitterWithReconnector("amqp://sensor:sensor@localhost:9999/%2f", "foo.bar.test", true, func(url string) (wabbit.Conn, error) { return nil, fmt.Errorf("error") }) if submitter != nil || err == nil { t.Fail() } } func TestSubmitter(t *testing.T) { serverURL := "amqp://sensor:sensor@localhost:9999/%2f/" log.SetLevel(log.DebugLevel) // start mock server fakeServer := server.NewServer(serverURL) fakeServer.Start() // set up consumer var buf bytes.Buffer allDone := make(chan bool) c, err := NewConsumer(serverURL, "foo.bar.test", "direct", "foo", "foo", "foo-test1", func(d wabbit.Delivery) { buf.Write(d.Body()) if buf.Len() == 4 { allDone <- true } }) if err != nil { t.Fatal(err) } // set up submitter submitter, err := MakeAMQPSubmitterWithReconnector(serverURL, "foo.bar.test", true, func(url string) (wabbit.Conn, error) { // we pass in a custom reconnector which uses the amqptest implementation var conn wabbit.Conn conn, err = amqptest.Dial(url) return conn, err }) if err != nil { t.Fatal(err) } // send some messages... submitter.Submit([]byte("1"), "foo", "text/plain") submitter.Submit([]byte("2"), "foo", "text/plain") submitter.Submit([]byte("3"), "foo", "text/plain") submitter.Submit([]byte("4"), "foo", "text/plain") // ... and wait until they are received and processed <-allDone // check if order and length is correct if buf.String() != "1234" { t.Fail() } // tear down test setup submitter.Finish() fakeServer.Stop() c.Shutdown() } func TestSubmitterReconnect(t *testing.T) { serverURL := "amqp://sensor:sensor@localhost:9992/%2f/" log.SetLevel(log.DebugLevel) // start mock server fakeServer := server.NewServer(serverURL) fakeServer.Start() // set up consumer var buf bytes.Buffer done := make(chan bool) c, err := NewConsumer(serverURL, "foo.bar.test", "direct", "foo", "foo", "foo-test1", func(d wabbit.Delivery) { buf.Write(d.Body()) log.Printf("received '%s', buf length %d", d.Body(), buf.Len()) if buf.Len() == 2 { done <- true } }) if err != nil { t.Fatal(err) } // set up submitter submitter, err := MakeAMQPSubmitterWithReconnector(serverURL, "foo.bar.test", true, func(url string) (wabbit.Conn, error) { // we pass in a custom reconnector which uses the amqptest implementation var conn wabbit.Conn conn, err = amqptest.Dial(url) return conn, err }) if err != nil { t.Fatal(err) } defer submitter.Finish() // send some messages... submitter.Submit([]byte("A"), "foo", "text/plain") submitter.Submit([]byte("B"), "foo", "text/plain") stopped := make(chan bool) restarted := make(chan bool) <-done go func() { fakeServer.Stop() close(stopped) time.Sleep(5 * time.Second) fakeServer := server.NewServer(serverURL) fakeServer.Start() close(restarted) }() <-stopped log.Info("server stopped") // these are buffered on client side because submitter will not publish // with immediate flag set submitter.Submit([]byte("C"), "foo", "text/plain") submitter.Submit([]byte("D"), "foo", "text/plain") <-restarted log.Info("server restarted") // reconnect consumer c.Shutdown() c2, err := NewConsumer(serverURL, "foo.bar.test", "direct", "foo", "foo", "foo-test1", func(d wabbit.Delivery) { buf.Write(d.Body()) log.Printf("received '%s', buf length %d", d.Body(), buf.Len()) if buf.Len() == 6 { done <- true } }) if err != nil { t.Fatal(err) } submitter.Submit([]byte("E"), "foo", "text/plain") submitter.Submit([]byte("F"), "foo", "text/plain") // ... and wait until they are received and processed <-done log.Debug("All done") // check if order and length is correct log.Info(buf.String()) if buf.String() != "ABCDEF" { t.Fail() } // tear down test setup c2.Shutdown() fakeServer.Stop() } fever-1.0.8/util/testdata/000077500000000000000000000000001353566242200154255ustar00rootroot00000000000000fever-1.0.8/util/testdata/jsonparse_eve.json000066400000000000000000000025051353566242200211650ustar00rootroot00000000000000{"timestamp":"2017-03-06T06:54:06.047429+0000","flow_id":4711,"in_iface":"enp2s0f1","event_type":"dns","vlan":61,"src_ip":"10.0.0.10","src_port":53,"dest_ip":"10.0.0.11","dest_port":51323,"proto":"UDP","dns":{"type":"answer","id":1,"rcode":"NOERROR","rrname":"test.test.local","rrtype":"A","ttl":2365,"rdata":"10.0.0.12"}} {"timestamp":"2017-03-06T06:54:10.839668+0000","flow_id":2323,"in_iface":"enp2s0f1","event_type":"fileinfo","vlan":91,"src_ip":"10.0.0.10","src_port":80,"dest_ip":"10.0.0.11","dest_port":52914,"proto":"TCP","http":{"hostname":"api.icndb.com","url":"\/jokes\/random?firstName=Chuck&lastName=Norris&limitTo=[nerdy]","http_user_agent":"Ruby","http_content_type":"application\/json","http_method":"GET","protocol":"HTTP\/1.1","status":200,"length":178},"app_proto":"http","fileinfo":{"filename":"\/jokes\/random","magic":"ASCII text, with no line terminators","state":"CLOSED","md5":"8d81d793b28b098e8623d47bae23cf44","stored":false,"size":176,"tx_id":0}} {"timestamp":"2017-03-06T06:54:14.002504+0000","flow_id":2134,"in_iface":"enp2s0f1","event_type":"http","vlan":72,"src_ip":"10.0.0.10","src_port":24092,"dest_ip":"10.0.0.11","dest_port":80,"proto":"TCP","tx_id":0,"http":{"hostname":"foobar","url":"\/scripts\/wpnbr.dll","http_content_type":"text\/xml","http_method":"POST","protocol":"HTTP\/1.1","status":200,"length":347}} fever-1.0.8/util/testdata/jsonparse_eve_broken1.json000066400000000000000000000013331353566242200226040ustar00rootroot00000000000000{"timestamp":"2017-03-06T06:54:06.047429+0000","flow_id":4711,"in_iface":"enp2s0f1","event_type":"dns","vlan":61,"src_ip":"10.0.0.10","src_port":53,"dest_ip":"10.0.0.11","dest_port":51323,"proto":"UDP","dns":{"type":"answer","id":1,"rcode":"NOERROR","rrname":"test.test.local","rrtype":"A","ttl":2365,"rdata":"10.0.0.12"}} {"timestamp":"2017-03-06T06:54:10 {"timestamp":"2017-03-06T06:54:14.002504+0000","flow_id":2134,"in_iface":"enp2s0f1","event_type":"http","vlan":72,"src_ip":"10.0.0.10","src_port":24092,"dest_ip":"10.0.0.11","dest_port":80,"proto":"TCP","tx_id":0,"http":{"hostname":"foobar","url":"\/scripts\/wpnbr.dll","http_content_type":"text\/xml","http_method":"POST","protocol":"HTTP\/1.1","status":200,"length":347}} fever-1.0.8/util/testdata/jsonparse_eve_empty.json000066400000000000000000000000001353566242200223670ustar00rootroot00000000000000fever-1.0.8/util/util.go000066400000000000000000000144071353566242200151260ustar00rootroot00000000000000package util // DCSO FEVER // Copyright (c) 2017, 2018, DCSO GmbH import ( "io/ioutil" "math/rand" "os" "strings" "time" "github.com/DCSO/fever/types" "github.com/buger/jsonparser" ) // ToolName is a string containing the name of this software, lowercase. var ToolName = "fever" // ToolNameUpper is a string containing the name of this software, uppercase. var ToolNameUpper = "FEVER" var evekeys = [][]string{ []string{"event_type"}, // 0 []string{"src_ip"}, // 1 []string{"src_port"}, // 2 []string{"dest_ip"}, // 3 []string{"dest_port"}, // 4 []string{"timestamp"}, // 5 []string{"proto"}, // 6 []string{"flow", "bytes_toclient"}, // 7 []string{"flow", "bytes_toserver"}, // 8 []string{"http", "hostname"}, // 9 []string{"http", "url"}, // 10 []string{"http", "http_method"}, // 11 []string{"dns", "rrname"}, // 12 []string{"flow", "pkts_toclient"}, // 13 []string{"flow", "pkts_toserver"}, // 14 []string{"dns", "rcode"}, // 15 []string{"dns", "rdata"}, // 16 []string{"dns", "rrtype"}, // 17 []string{"dns", "type"}, // 18 []string{"tls", "sni"}, // 19 []string{"dns", "version"}, // 20 []string{"dns", "answers"}, // 21 []string{"flow_id"}, // 22 } // ParseJSON extracts relevant fields from an EVE JSON entry into an Entry struct. func ParseJSON(json []byte) (e types.Entry, parseerr error) { e = types.Entry{} jsonparser.EachKey(json, func(idx int, value []byte, vt jsonparser.ValueType, err error) { if parseerr != nil { return } if err != nil { parseerr = err return } switch idx { case 0: e.EventType, err = jsonparser.ParseString(value) if err != nil { parseerr = err return } case 1: e.SrcIP = string(value[:]) case 2: e.SrcPort, err = jsonparser.ParseInt(value) if err != nil { parseerr = err return } case 3: e.DestIP = string(value[:]) case 4: e.DestPort, err = jsonparser.ParseInt(value) if err != nil { parseerr = err return } case 5: e.Timestamp = string(value[:]) case 6: e.Proto = string(value[:]) case 7: e.BytesToClient, err = jsonparser.ParseInt(value) if err != nil { parseerr = err return } case 8: e.BytesToServer, err = jsonparser.ParseInt(value) if err != nil { parseerr = err return } case 9: e.HTTPHost, err = jsonparser.ParseString(value) if err != nil { parseerr = err return } case 10: e.HTTPUrl, err = jsonparser.ParseString(value) if err != nil { parseerr = err return } case 11: e.HTTPMethod, err = jsonparser.ParseString(value) if err != nil { parseerr = err return } case 12: e.DNSRRName, err = jsonparser.ParseString(value) if err != nil { parseerr = err return } case 13: e.PktsToClient, err = jsonparser.ParseInt(value) if err != nil { parseerr = err return } case 14: e.PktsToServer, err = jsonparser.ParseInt(value) if err != nil { parseerr = err return } case 15: e.DNSRCode, err = jsonparser.ParseString(value) if err != nil { parseerr = err return } case 16: e.DNSRData, err = jsonparser.ParseString(value) if err != nil { parseerr = err return } case 17: e.DNSRRType, err = jsonparser.ParseString(value) if err != nil { parseerr = err return } case 18: e.DNSType, err = jsonparser.ParseString(value) if err != nil { parseerr = err return } case 19: e.TLSSni, err = jsonparser.ParseString(value) if err != nil { parseerr = err return } case 20: e.DNSVersion, err = jsonparser.ParseInt(value) if err != nil { parseerr = err return } case 21: if e.DNSVersion == 2 { e.DNSAnswers = make([]types.DNSAnswer, 0) jsonparser.ArrayEach(value, func(mvalue []byte, dataType jsonparser.ValueType, offset int, err error) { var rrname, rdata, rrtype string var merr error if parseerr != nil { return } if err != nil { parseerr = err return } rdata, merr = jsonparser.GetString(mvalue, "rdata") if merr != nil { if merr != jsonparser.KeyPathNotFoundError { parseerr = merr return } } rrname, merr = jsonparser.GetString(mvalue, "rrname") if merr != nil { parseerr = merr return } rrtype, merr = jsonparser.GetString(mvalue, "rrtype") if merr != nil { parseerr = merr return } dnsa := types.DNSAnswer{ DNSRCode: e.DNSRCode, DNSRData: rdata, DNSRRName: rrname, DNSRRType: rrtype, } e.DNSAnswers = append(e.DNSAnswers, dnsa) }) } if err != nil { parseerr = err return } case 22: e.FlowID, err = jsonparser.ParseString(value) if err != nil { parseerr = err return } } }, evekeys...) e.JSONLine = string(json) return e, parseerr } // GetSensorID returns the machine ID of the system it is being run on, or // the string """ if the ID cannot be determined. func GetSensorID() (string, error) { if _, err := os.Stat("/etc/machine-id"); os.IsNotExist(err) { return "", nil } b, err := ioutil.ReadFile("/etc/machine-id") if err != nil { return "", nil } return strings.TrimSpace(string(b)), nil } var src = rand.NewSource(time.Now().UnixNano()) // RandStringBytesMaskImprSrc returns a random string of a given length. func RandStringBytesMaskImprSrc(n int) string { letterBytes := "abcdefghijk" letterIdxBits := uint(6) // 6 bits to represent a letter index letterIdxMask := int64(1<= 0; { if remain == 0 { cache, remain = src.Int63(), letterIdxMax } if idx := int(cache & letterIdxMask); idx < len(letterBytes) { b[i] = letterBytes[idx] i-- } cache >>= letterIdxBits remain-- } return string(b) } fever-1.0.8/util/util_test.go000066400000000000000000000077051353566242200161700ustar00rootroot00000000000000package util // DCSO FEVER // Copyright (c) 2017, DCSO GmbH import ( "bufio" "os" "reflect" "testing" "github.com/DCSO/fever/types" ) var entries = []types.Entry{ types.Entry{ SrcIP: "10.0.0.10", SrcPort: 53, DestIP: "10.0.0.11", DestPort: 51323, Timestamp: "2017-03-06T06:54:06.047429+0000", EventType: "dns", Proto: "UDP", JSONLine: `{"timestamp":"2017-03-06T06:54:06.047429+0000","flow_id":4711,"in_iface":"enp2s0f1","event_type":"dns","vlan":61,"src_ip":"10.0.0.10","src_port":53,"dest_ip":"10.0.0.11","dest_port":51323,"proto":"UDP","dns":{"type":"answer","id":1,"rcode":"NOERROR","rrname":"test.test.local","rrtype":"A","ttl":2365,"rdata":"10.0.0.12"}}`, DNSRRName: "test.test.local", DNSRRType: "A", DNSRCode: "NOERROR", DNSRData: "10.0.0.12", DNSType: "answer", FlowID: "4711", }, types.Entry{ SrcIP: "10.0.0.10", SrcPort: 80, DestIP: "10.0.0.11", DestPort: 52914, Timestamp: "2017-03-06T06:54:10.839668+0000", EventType: "fileinfo", Proto: "TCP", JSONLine: `{"timestamp":"2017-03-06T06:54:10.839668+0000","flow_id":2323,"in_iface":"enp2s0f1","event_type":"fileinfo","vlan":91,"src_ip":"10.0.0.10","src_port":80,"dest_ip":"10.0.0.11","dest_port":52914,"proto":"TCP","http":{"hostname":"api.icndb.com","url":"\/jokes\/random?firstName=Chuck&lastName=Norris&limitTo=[nerdy]","http_user_agent":"Ruby","http_content_type":"application\/json","http_method":"GET","protocol":"HTTP\/1.1","status":200,"length":178},"app_proto":"http","fileinfo":{"filename":"\/jokes\/random","magic":"ASCII text, with no line terminators","state":"CLOSED","md5":"8d81d793b28b098e8623d47bae23cf44","stored":false,"size":176,"tx_id":0}}`, HTTPHost: "api.icndb.com", HTTPUrl: `/jokes/random?firstName=Chuck&lastName=Norris&limitTo=[nerdy]`, HTTPMethod: `GET`, FlowID: "2323", }, types.Entry{ SrcIP: "10.0.0.10", SrcPort: 24092, DestIP: "10.0.0.11", DestPort: 80, Timestamp: "2017-03-06T06:54:14.002504+0000", EventType: "http", Proto: "TCP", JSONLine: `{"timestamp":"2017-03-06T06:54:14.002504+0000","flow_id":2134,"in_iface":"enp2s0f1","event_type":"http","vlan":72,"src_ip":"10.0.0.10","src_port":24092,"dest_ip":"10.0.0.11","dest_port":80,"proto":"TCP","tx_id":0,"http":{"hostname":"foobar","url":"\/scripts\/wpnbr.dll","http_content_type":"text\/xml","http_method":"POST","protocol":"HTTP\/1.1","status":200,"length":347}}`, HTTPHost: "foobar", HTTPUrl: `/scripts/wpnbr.dll`, HTTPMethod: `POST`, FlowID: "2134", }, } func TestJSONParseEVE(t *testing.T) { f, err := os.Open("testdata/jsonparse_eve.json") if err != nil { t.Fatalf(err.Error()) } scanner := bufio.NewScanner(f) i := 0 for scanner.Scan() { json := scanner.Bytes() e, err := ParseJSON(json) if err != nil { t.Fatalf(err.Error()) } if !reflect.DeepEqual(entries[i], e) { t.Fatalf("entry %d parsed from JSON does not match expected value", i) } i++ } } func TestJSONParseEVEBroken(t *testing.T) { f, err := os.Open("testdata/jsonparse_eve_broken1.json") if err != nil { t.Fatalf(err.Error()) } scanner := bufio.NewScanner(f) i := 0 for scanner.Scan() { json := scanner.Bytes() e, err := ParseJSON(json) if i != 1 { if err != nil { t.Fatalf(err.Error()) } } if i == 1 { if err == nil { t.Fatalf("broken JSON line should raise an error") } } if i != 1 { if !reflect.DeepEqual(entries[i], e) { t.Fatalf("entry %d parsed from JSON does not match expected value", i) } } i++ } } func TestJSONParseEVEempty(t *testing.T) { f, err := os.Open("testdata/jsonparse_eve_empty.json") if err != nil { t.Fatalf(err.Error()) } scanner := bufio.NewScanner(f) i := 0 for scanner.Scan() { i++ } if i > 0 { t.Fatal("empty file should not generate any entries") } } func TestGetSensorID(t *testing.T) { sid, err := GetSensorID() if err != nil { t.Fatalf(err.Error()) } if len(sid) == 0 { t.Fatal("missing sensor ID") } }