diff --git a/.circleci/check-gofmt b/.circleci/check-gofmt new file mode 100755 index 0000000..b9c4731 --- /dev/null +++ b/.circleci/check-gofmt @@ -0,0 +1,10 @@ +#!/bin/bash + +result="$(gofmt -e -s -l . 2>&1 | grep -v '^vendor/' )" +if [ -n "$result" ]; then + echo "Go code is not formatted, run 'gofmt -e -s -w .'" >&2 + echo "$result" + exit 1 +else + echo "Go code is formatted well" +fi diff --git a/.circleci/config.yml b/.circleci/config.yml new file mode 100644 index 0000000..5beec60 --- /dev/null +++ b/.circleci/config.yml @@ -0,0 +1,37 @@ +# Golang CircleCI 2.0 configuration file +# +# Check https://circleci.com/docs/2.0/language-go/ for more details +version: 2 +jobs: + build: + docker: + - image: circleci/golang:1.13 + steps: + - checkout + + # run tests and report coverage + - run: go test -v -cover -race -coverprofile=coverage.txt ./... + - run: bash <(curl -s https://codecov.io/bash) + + # build binary + - run: go install github.com/bio-routing/tflow2 + + checks: + docker: + - image: circleci/golang:1.12 + steps: + - checkout + + # check goftm + - run: .circleci/check-gofmt + + # check misspell + - run: go get github.com/client9/misspell/cmd/misspell + - run: misspell -error . + +workflows: + version: 2 + workflow: + jobs: + - checks + - build diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..850de5e --- /dev/null +++ b/.gitignore @@ -0,0 +1,3 @@ +tflow2 +/vendor +config.yml diff --git a/AUTHORS b/AUTHORS index 813d69a..6f97c6e 100644 --- a/AUTHORS +++ b/AUTHORS @@ -11,3 +11,7 @@ # Please keep the list sorted. Google Inc. +EXARING AG +Oliver Herms +Julian Kornberger +Cedric Kienzler diff --git a/Gopkg.lock b/Gopkg.lock new file mode 100644 index 0000000..abd16ac --- /dev/null +++ b/Gopkg.lock @@ -0,0 +1,191 @@ +# This file is autogenerated, do not edit; changes may be undone by the next 'dep ensure'. + + +[[projects]] + digest = "1:ffe9824d294da03b391f44e1ae8281281b4afc1bdaa9588c9097785e3af10cec" + name = "github.com/davecgh/go-spew" + packages = ["spew"] + pruneopts = "UT" + revision = "8991bc29aa16c548c550c7ff78260e27b9ab7c73" + version = "v1.1.1" + +[[projects]] + branch = "master" + digest = "1:1ba1d79f2810270045c328ae5d674321db34e3aae468eb4233883b473c5c0467" + name = "github.com/golang/glog" + packages = ["."] + pruneopts = "UT" + revision = "23def4e6c14b4da8ac2ed8007337bc5eb5007998" + +[[projects]] + digest = "1:b60efdeb75d3c0ceed88783ac2495256aba3491a537d0f31401202579fd62a94" + name = "github.com/golang/mock" + packages = ["gomock"] + pruneopts = "UT" + revision = "51421b967af1f557f93a59e0057aaf15ca02e29c" + version = "v1.2.0" + +[[projects]] + digest = "1:4c0989ca0bcd10799064318923b9bc2db6b4d6338dd75f3f2d86c3511aaaf5cf" + name = "github.com/golang/protobuf" + packages = [ + "proto", + "ptypes", + "ptypes/any", + "ptypes/duration", + "ptypes/timestamp", + ] + pruneopts = "UT" + revision = "aa810b61a9c79d51363740d207bb46cf8e620ed5" + version = "v1.2.0" + +[[projects]] + digest = "1:40e195917a951a8bf867cd05de2a46aaf1806c50cf92eebf4c16f78cd196f747" + name = "github.com/pkg/errors" + packages = ["."] + pruneopts = "UT" + revision = "645ef00459ed84a119197bfb8d8205042c6df63d" + version = "v0.8.0" + +[[projects]] + digest = "1:0028cb19b2e4c3112225cd871870f2d9cf49b9b4276531f03438a88e94be86fe" + name = "github.com/pmezard/go-difflib" + packages = ["difflib"] + pruneopts = "UT" + revision = "792786c7400a136282c1664665ae0a8db921c6c2" + version = "v1.0.0" + +[[projects]] + digest = "1:98e86b9bb8fbb9053ba2b213c69cfc4753335e53d842178cebea6526a60571f3" + name = "github.com/soniah/gosnmp" + packages = ["."] + pruneopts = "UT" + revision = "28507a583d6f323959b554e0fc6d33eb589f5c05" + version = "v1.16" + +[[projects]] + digest = "1:18752d0b95816a1b777505a97f71c7467a8445b8ffb55631a7bf779f6ba4fa83" + name = "github.com/stretchr/testify" + packages = ["assert"] + pruneopts = "UT" + revision = "f35b8ab0b5a2cef36673838d662e249dd9c94686" + version = "v1.2.2" + +[[projects]] + branch = "master" + digest = "1:89a0cb976397aa9157a45bb2b896d0bcd07ee095ac975e0f03c53250c402265e" + name = "golang.org/x/net" + packages = [ + "context", + "http/httpguts", + "http2", + "http2/hpack", + "idna", + "internal/timeseries", + "trace", + ] + pruneopts = "UT" + revision = "927f97764cc334a6575f4b7a1584a147864d5723" + +[[projects]] + branch = "master" + digest = "1:5b7594f7c43cb5fc8f31c3f4a9f6a61ee798dbd5fcb36b70cb3d4780aad4c509" + name = "golang.org/x/sys" + packages = ["unix"] + pruneopts = "UT" + revision = "9a3f9b0469bbc6b8802087ae5c0af9f61502de01" + +[[projects]] + digest = "1:a2ab62866c75542dd18d2b069fec854577a20211d7c0ea6ae746072a1dccdd18" + name = "golang.org/x/text" + packages = [ + "collate", + "collate/build", + "internal/colltab", + "internal/gen", + "internal/tag", + "internal/triegen", + "internal/ucd", + "language", + "secure/bidirule", + "transform", + "unicode/bidi", + "unicode/cldr", + "unicode/norm", + "unicode/rangetable", + ] + pruneopts = "UT" + revision = "f21a4dfb5e38f5895301dc265a8def02365cc3d0" + version = "v0.3.0" + +[[projects]] + branch = "master" + digest = "1:077c1c599507b3b3e9156d17d36e1e61928ee9b53a5b420f10f28ebd4a0b275c" + name = "google.golang.org/genproto" + packages = ["googleapis/rpc/status"] + pruneopts = "UT" + revision = "bd9b4fb69e2ffd37621a6caa54dcbead29b546f2" + +[[projects]] + digest = "1:9edd250a3c46675d0679d87540b30c9ed253b19bd1fd1af08f4f5fb3c79fc487" + name = "google.golang.org/grpc" + packages = [ + ".", + "balancer", + "balancer/base", + "balancer/roundrobin", + "binarylog/grpc_binarylog_v1", + "codes", + "connectivity", + "credentials", + "credentials/internal", + "encoding", + "encoding/proto", + "grpclog", + "internal", + "internal/backoff", + "internal/binarylog", + "internal/channelz", + "internal/envconfig", + "internal/grpcrand", + "internal/grpcsync", + "internal/syscall", + "internal/transport", + "keepalive", + "metadata", + "naming", + "peer", + "resolver", + "resolver/dns", + "resolver/passthrough", + "stats", + "status", + "tap", + ] + pruneopts = "UT" + revision = "df014850f6dee74ba2fc94874043a9f3f75fbfd8" + version = "v1.17.0" + +[[projects]] + digest = "1:4d2e5a73dc1500038e504a8d78b986630e3626dc027bc030ba5c75da257cdb96" + name = "gopkg.in/yaml.v2" + packages = ["."] + pruneopts = "UT" + revision = "51d6538a90f86fe93ac480b35f37b2be17fef232" + version = "v2.2.2" + +[solve-meta] + analyzer-name = "dep" + analyzer-version = 1 + input-imports = [ + "github.com/golang/glog", + "github.com/golang/protobuf/proto", + "github.com/pkg/errors", + "github.com/soniah/gosnmp", + "github.com/stretchr/testify/assert", + "golang.org/x/net/context", + "google.golang.org/grpc", + "gopkg.in/yaml.v2", + ] + solver-name = "gps-cdcl" + solver-version = 1 diff --git a/Gopkg.toml b/Gopkg.toml new file mode 100644 index 0000000..9bc0be9 --- /dev/null +++ b/Gopkg.toml @@ -0,0 +1,34 @@ +# Refer to https://golang.github.io/dep/docs/Gopkg.toml.html +# for detailed Gopkg.toml documentation. + +[[constraint]] + branch = "master" + name = "github.com/golang/glog" + +[[constraint]] + name = "github.com/golang/protobuf" + version = "1.x" + +[[constraint]] + name = "github.com/soniah/gosnmp" + version = "1.x" + +[[constraint]] + name = "github.com/stretchr/testify" + version = "1.x" + +[[constraint]] + branch = "master" + name = "golang.org/x/net" + +[[constraint]] + name = "google.golang.org/grpc" + version = "1.x" + +[[constraint]] + name = "gopkg.in/yaml.v2" + version = "2.x" + +[prune] + go-tests = true + unused-packages = true diff --git a/README.md b/README.md index 7f95115..fde5b52 100644 --- a/README.md +++ b/README.md @@ -1,139 +1,91 @@ # tflow2 -tflow2 is an in memory netflow version 9 and IPFIX analyzer. -It is designed for fast arbitrary queries. +[![CircleCI](https://circleci.com/gh/bio-routing/tflow2/tree/master.svg?style=shield)](https://circleci.com/gh/bio-routing/tflow2/tree/master) +[![Codecov](https://codecov.io/gh/bio-routing/tflow2/branch/master/graph/badge.svg)](https://codecov.io/gh/bio-routing/tflow2) +[![Go ReportCard](http://goreportcard.com/badge/bio-routing/tflow2)](http://goreportcard.com/report/bio-routing/tflow2) -*This software is currently not maintained in this repo. Check out -https://github.com/taktv6/tflow2* +tflow2 is an in memory netflow version 9, IPFIX and Sflow analyzer. +It is designed for fast arbitrary queries and exports data to [Prometheus](https://prometheus.io/). ## Usage -Quick install with `go get -u github.com/google/tflow2` -and `go build github.com/google/tflow2` +Quick install with `go get -u github.com/bio-routing/tflow2` +and `go build github.com/bio-routing/tflow2` or download a pre-built binary from the -[releases page](https://github.com/google/tflow2/releases). +[releases page](https://github.com/bio-routing/tflow2/releases). The release binaries have an additional command, `tflow2 -version`, which reports the release version. Once you start the main binary it will start reading netflow version 9 packets -on port 2055 UDP and IPFIX packets on port 4739 on all interfaces. -For user interaction it starts a webserver on port 4444 TCP on all interfaces. +on port `2055` UDP and IPFIX packets on port `4739` on all interfaces. +For user interaction it starts a webserver on port `4444` TCP on all interfaces. The webinterface allows you to run queries against the collected data. Start time and router are mandatory criteria. If you don't provide any of these you will always receive an empty result. -### Command line arguments --aggregation=int - - This is the time window in seconds used for aggregation of flows - --alsologtostderr - - Will send logs to stderr on top - --anonymize=bool - - If set to true IP addresses will be replaced with NULL before dumping - flows to disk. Default is false. +### Config file --bgp=bool +There is YAML file as config. Defaults can be found in `config-example.yml`. +You'll at least need to add your Netflow/IPFIX/Sflow agents and adjust (if you don't +want to work with interface IDs) your SNMP RO community. - tflow will connect to BIRD and BIRD6 unix domain sockets to augment flows - with prefix and autonomous system information. This is useful in case your - routers exported netflow data is lacking these. This is the case for example - if you use the ipt-NETFLOW on Linux. - - BIRD needs a BGP session to each router that is emitting flow packets. - The protocol needs to be named like this: "nf_x_y_z_a" with x_y_z_a being the - source IP address of flow packets, e.g. nf_185_66_194_0 - --birdSock=path - - This is the path to the unix domain socket to talk to BIRD +### Command line arguments --birdSock6=path +`-alsologtostderr` - This is the path to the unix domain socket to talk to BIRD6 + Will send logs to stderr on top. --channelBuffer=int +`-channelBuffer=int` This is the amount of elements that any channel within the program can buffer. --dbaddworkers=int +`-dbaddworkers=int` This is the amount of workers that are used to add flows into the in memory database. --debug=int - - Debug level. 1 will give you some more information. 2 is not in use at - the moment. 3 will dump every single received netflow packet on the screen. +`-log_backtrace_at` --log_backtrace_at + when logging hits line file:N, emit a stack trace (default :0). - when logging hits line file:N, emit a stack trace (default :0) +`-log_dir` --log_dir + If non-empty, write log files in this directory. - If non-empty, write log files in this directory +`-logtostderr` --logtostderr + log to standard error instead of files. - log to standard error instead of files - --maxage=int - - Maximum age of flow data to keep in memory. Choose this parameter wisely or you - will run out of memory. Experience shows that 500k flows need about 50G of RAM. - --netflow=addr - - Address to use to receive netflow packets (default ":2055") via UDP - --ipfix=addr - - Address to use to receive IPFIX packets (default ":4739") via UDP - ---protonums=path - - CSV file to read protocol definitions from (default "protocol_numbers.csv"). - This is needed for suggestions in the web interface. - --samplerate=int +`-samplerate=int` Samplerate of your routers. This is used to deviate real packet and volume rates in case you use sampling. --sockreaders=int - - Num of go routines reading and parsing netflow packets (default 24) - --stderrthreshold +`-sockreaders=int` - logs at or above this threshold go to stderr + Num of go routines reading and parsing netflow packets (default 24). --v value +`-stderrthreshold` - log level for V logs + logs at or above this threshold go to stderr. --vmodule value +`-v value` - comma-separated list of pattern=N settings for file-filtered logging + log level for V logs. --web=addr +`-vmodule value` - Address to use for web service (default ":4444") + comma-separated list of pattern=N settings for file-filtered logging. ## Limitations -This software currently only supports receiving netflow packets over IPv4. -Please be aware this software is not platform indipendent. It will only work +Please be aware this software is not platform independent. It will only work on little endian machines (such as x86) ## License -(c) Google, 2017. Licensed under [Apache-2](LICENSE) license. +(c) Google, EXARING, Oliver Herms, 2017. Licensed under [Apache-2](LICENSE) license. This is not an official Google product. diff --git a/annotator/annotator.go b/annotation/annotation.go similarity index 53% rename from annotator/annotator.go rename to annotation/annotation.go index 623800a..8f3bdb2 100644 --- a/annotator/annotator.go +++ b/annotation/annotation.go @@ -9,40 +9,38 @@ // See the License for the specific language governing permissions and // limitations under the License. -// Package annotator annotates flows with meta data from external sources -package annotator +// Package annotation annotates flows with meta data from external sources +package annotation import ( + "context" "sync/atomic" - "github.com/google/tflow2/annotator/bird" - "github.com/google/tflow2/netflow" - "github.com/google/tflow2/stats" + "github.com/bio-routing/tflow2/config" + "github.com/bio-routing/tflow2/netflow" + "github.com/bio-routing/tflow2/stats" + "google.golang.org/grpc" + + log "github.com/sirupsen/logrus" ) // Annotator represents an flow annotator type Annotator struct { - inputs []chan *netflow.Flow - output chan *netflow.Flow - aggregation int64 - numWorkers int - bgpAugment bool - birdAnnotator *bird.Annotator - debug int + inputs []chan *netflow.Flow + output chan *netflow.Flow + numWorkers int + bgpAugment bool + debug int + cfg *config.Config } // New creates a new `Annotator` instance -func New(inputs []chan *netflow.Flow, output chan *netflow.Flow, numWorkers int, aggregation int64, bgpAugment bool, birdSock string, birdSock6 string, debug int) *Annotator { +func New(inputs []chan *netflow.Flow, output chan *netflow.Flow, numWorkers int, cfg *config.Config) *Annotator { a := &Annotator{ - inputs: inputs, - output: output, - aggregation: aggregation, - numWorkers: numWorkers, - bgpAugment: bgpAugment, - debug: debug, - } - if bgpAugment { - a.birdAnnotator = bird.NewAnnotator(birdSock, birdSock6, debug) + inputs: inputs, + output: output, + numWorkers: numWorkers, + cfg: cfg, } a.Init() return a @@ -54,20 +52,38 @@ func (a *Annotator) Init() { for _, ch := range a.inputs { for i := 0; i < a.numWorkers; i++ { go func(ch chan *netflow.Flow) { + clients := make([]netflow.AnnotatorClient, 0) + for _, an := range a.cfg.Annotators { + var opts []grpc.DialOption + opts = append(opts, grpc.WithInsecure()) + log.Infof("Connecting to annotator %s at %s", an.Name, an.Target) + conn, err := grpc.Dial(an.Target, opts...) + if err != nil { + log.Errorf("Failed to dial: %v", err) + } + + clients = append(clients, netflow.NewAnnotatorClient(conn)) + } + for { // Read flow from netflow/IPFIX module fl := <-ch // Align timestamp on `aggrTime` raster - fl.Timestamp = fl.Timestamp - (fl.Timestamp % a.aggregation) + fl.Timestamp = fl.Timestamp - (fl.Timestamp % a.cfg.AggregationPeriod) // Update global statstics atomic.AddUint64(&stats.GlobalStats.FlowBytes, fl.Size) atomic.AddUint64(&stats.GlobalStats.FlowPackets, uint64(fl.Packets)) - // Annotate flows with ASN and Prefix information from local BIRD (bird.nic.cz) instance - if a.bgpAugment { - a.birdAnnotator.Augment(fl) + // Send flow to external annotators + for _, c := range clients { + tmpFlow, err := c.Annotate(context.Background(), fl) + if err != nil { + log.Errorf("Unable to annotate") + continue + } + fl = tmpFlow } // Send flow over to database module diff --git a/annotator/annotator_test.go b/annotation/annotation_test.go similarity index 71% rename from annotator/annotator_test.go rename to annotation/annotation_test.go index be1e7d8..4b156c6 100644 --- a/annotator/annotator_test.go +++ b/annotation/annotation_test.go @@ -9,19 +9,27 @@ // See the License for the specific language governing permissions and // limitations under the License. -package annotator +package annotation import ( "testing" - "github.com/google/tflow2/netflow" + "github.com/bio-routing/tflow2/config" + "github.com/bio-routing/tflow2/netflow" ) func TestTimestampAggr(t *testing.T) { - ca := make(chan *netflow.Flow) - cb := make(chan *netflow.Flow) - var aggr int64 = 60 - go Init(ca, cb, aggr, false, 1) + outCh := make(chan *netflow.Flow) + nWorkers := 1 + + inCh := make([]chan *netflow.Flow, 0) + inCh = append(inCh, make(chan *netflow.Flow)) + + a := New(inCh, outCh, nWorkers, &config.Config{ + AggregationPeriod: 60, + BGPAugmentation: &config.BGPAugment{}, + }) + a.Init() testData := []struct { ts int64 @@ -42,8 +50,8 @@ func TestTimestampAggr(t *testing.T) { Timestamp: test.ts, } - ca <- fl - fl = <-cb + inCh[0] <- fl + fl = <-outCh if fl.Timestamp != test.want { t.Errorf("Input: %d, Got: %d, Expected: %d, ", test.ts, fl.Timestamp, test.want) } diff --git a/annotator/bird/bird.go b/annotator/bird/bird.go deleted file mode 100644 index bc015ad..0000000 --- a/annotator/bird/bird.go +++ /dev/null @@ -1,320 +0,0 @@ -// Copyright 2017 Google Inc. All Rights Reserved. -// Licensed under the Apache License, Version 2.0 (the "License"); -// you may not use this file except in compliance with the License. -// You may obtain a copy of the License at -// http://www.apache.org/licenses/LICENSE-2.0 -// Unless required by applicable law or agreed to in writing, software -// distributed under the License is distributed on an "AS IS" BASIS, -// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -// See the License for the specific language governing permissions and -// limitations under the License. - -// Package bird can lookup IP prefixes and autonomous system numbers and -// add them to flows in case the routers implementation doesn't support this, e.g. ipt-NETFLOW -package bird - -import ( - "fmt" - "net" - "strconv" - "strings" - "sync" - "sync/atomic" - - "github.com/golang/glog" - "github.com/google/tflow2/netflow" - "github.com/google/tflow2/stats" -) - -// QueryResult carries all useful information we extracted from a BIRD querys result -type QueryResult struct { - // Pfx is the prefix that is being used to forward packets for the IP - // address from the query - Pfx net.IPNet - - // As is the ASN that the subject IP is announced by - AS uint32 - - // NhAs is the ASN of the subject IPs associated Next Hop - NHAS uint32 -} - -// QueryCache represents a set of QueryResults that have been cached -type QueryCache struct { - cache map[string]QueryResult - lock sync.RWMutex -} - -// birdCon represents a connection to a BIRD instance -type birdCon struct { - sock string - con net.Conn - recon chan bool - lock sync.RWMutex -} - -// Annotator represents a BIRD based BGP annotator -type Annotator struct { - queryC chan string - resC chan *QueryResult - - // cache is used to cache query results - cache *QueryCache - - // connection to BIRD - bird4 *birdCon - - // connectio to BIRD6 - bird6 *birdCon - - // debug level - debug int -} - -// NewAnnotator creates a new BIRD annotator and get's service started -func NewAnnotator(sock string, sock6 string, debug int) *Annotator { - a := &Annotator{ - cache: newQueryCache(), - queryC: make(chan string), - resC: make(chan *QueryResult), - debug: debug, - } - - var wg sync.WaitGroup - - wg.Add(1) - go func() { - defer wg.Done() - a.bird4 = newBirdCon(sock) - }() - - wg.Add(1) - go func() { - defer wg.Done() - a.bird6 = newBirdCon(sock6) - }() - - wg.Wait() - go a.gateway() - - return a -} - -// getConn gets the net.Conn property of the BIRD connection -func (c *birdCon) getConn() *net.Conn { - return &c.con -} - -// newQueryCache creates and initializes a new `QueryCache` -func newQueryCache() *QueryCache { - return &QueryCache{cache: make(map[string]QueryResult)} -} - -// reconnector receives a signal via channel that triggers a connection attempt to BIRD -func (c *birdCon) reconnector() { - for { - // wait for signal of a closed connection - <-c.recon - - // try to connect up to 5 times - for i := 0; i < 5; i++ { - tmpCon, err := net.Dial("unix", c.sock) - if err != nil { - glog.Warningf("Unable to connect to BIRD on %s: %v", c.sock, err) - continue - } - - // Read welcome message we are not interested in - buf := make([]byte, 1024) - nbytes, err := tmpCon.Read(buf[:]) - if err != nil || nbytes == 0 { - if err == nil { - tmpCon.Close() - } - glog.Warning("Reading from BIRD failed: %v", err) - continue - } - - c.lock.Lock() - c.con = tmpCon - c.lock.Unlock() - break - } - } -} - -// Get tries to receive an entry from QueryCache `qc` -func (qc *QueryCache) Get(addr []byte) *QueryResult { - qc.lock.RLock() - defer qc.lock.RUnlock() - - res, ok := qc.cache[net.IP(addr).String()] - if !ok { - atomic.AddUint64(&stats.GlobalStats.BirdCacheMiss, 1) - return nil - } - atomic.AddUint64(&stats.GlobalStats.BirdCacheHits, 1) - return &res -} - -// Set sets data for `addr` in QueryCache `qc` to `qres` -func (qc *QueryCache) Set(addr []byte, qres *QueryResult) { - qc.lock.Lock() - defer qc.lock.Unlock() - qc.cache[net.IP(addr).String()] = *qres -} - -// newBirdCon creates a birdCon to socket `s` -func newBirdCon(s string) *birdCon { - b := &birdCon{ - sock: s, - recon: make(chan bool), - } - go b.reconnector() - b.recon <- true - return b -} - -// Augment function provides the main interface to the external world to consume service of this module -func (a *Annotator) Augment(fl *netflow.Flow) { - srcRes := a.cache.Get(fl.SrcAddr) - if srcRes == nil { - srcRes = a.query(net.IP(fl.Router), fl.SrcAddr) - a.cache.Set(fl.SrcAddr, srcRes) - } - - dstRes := a.cache.Get(fl.DstAddr) - if dstRes == nil { - dstRes = a.query(net.IP(fl.Router), fl.DstAddr) - a.cache.Set(fl.DstAddr, dstRes) - } - - fl.SrcPfx = &netflow.Pfx{} - fl.SrcPfx.IP = srcRes.Pfx.IP - fl.SrcPfx.Mask = srcRes.Pfx.Mask - - fl.DstPfx = &netflow.Pfx{} - fl.DstPfx.IP = dstRes.Pfx.IP - fl.DstPfx.Mask = dstRes.Pfx.Mask - - fl.SrcAs = srcRes.AS - fl.DstAs = dstRes.AS - fl.NextHopAs = dstRes.NHAS -} - -// query forms a query, sends it to the processing engine, reads the result and returns it -func (a *Annotator) query(rtr net.IP, addr net.IP) *QueryResult { - query := fmt.Sprintf("show route all for %s protocol nf_%s\n", addr.String(), strings.Replace(rtr.String(), ".", "_", -1)) - a.queryC <- query - return <-a.resC -} - -// gateway starts the main service routine -func (a *Annotator) gateway() { - query := "" - - buf := make([]byte, 1024) - for { - var res QueryResult - query = <-a.queryC - if query == "" { - continue - } - data := []byte(query) - - // Determine if we are being queried for an IPv4 or an IPv6 address - bird := a.bird4 - if strings.Contains(query, ":") { - bird = a.bird6 - } - - // Skip annotation if we're not connected to bird yet - bird.lock.RLock() - if bird.con == nil { - glog.Warningf("skipped annotating flow: BIRD is not connected yet") - bird.lock.RUnlock() - a.resC <- &res - continue - } - - // Send query to BIRD - _, err := bird.con.Write(data) - if err != nil { - bird.lock.RUnlock() - glog.Errorf("Unable to write to BIRD: %v", err) - bird.recon <- true - continue - } - bird.lock.RUnlock() - - // Read reply from BIRD - n, err := bird.con.Read(buf[:]) - if err != nil { - bird.lock.RUnlock() - glog.Errorf("unable to read from BIRD: %v", err) - bird.recon <- true - continue - } - - // Parse BIRDs output - output := string(buf[:n]) - lines := strings.Split(output, "\n") - for i, line := range lines { - // Take the first line as that should contain the prefix - if i == 0 { - parts := strings.Split(line, " ") - if len(parts) == 0 { - glog.Warningf("unexpected empty output for query '%v'", query) - continue - } - pfx := parts[0] - parts = strings.Split(pfx, "-") - if len(parts) != 2 { - glog.Warningf("unexpected split results for query '%v'", query) - continue - } - pfx = parts[1] - - _, tmpNet, err := net.ParseCIDR(pfx) - res.Pfx = *tmpNet - if err != nil { - glog.Warningf("unable to parse CIDR from BIRD: %v (query '%v')", err, query) - continue - } - continue - } - - // Find line that contains the AS Path - if strings.Contains(line, "BGP.as_path: ") { - // Remove curly braces from BIRD AS path (ignores aggregators), e.g. BGP.as_path: 25291 3320 20940 { 16625 } - line = strings.Replace(line, "{ ", "", -1) - line = strings.Replace(line, " }", "", -1) - - parts := strings.Split(line, "BGP.as_path: ") - pathParts := strings.Split(parts[1], " ") - - if len(parts) < 2 || parts[1] == "" { - break - } - - AS, err := strconv.ParseUint(pathParts[len(pathParts)-1], 10, 32) - if err != nil { - glog.Warningf("unable to parse ASN") - } - - NHAS, err := strconv.ParseUint(pathParts[0], 10, 32) - if err != nil { - glog.Warningf("unable to parse next hop ASN") - } - - res.AS = uint32(AS) - res.NHAS = uint32(NHAS) - break - } - } - if res.AS == 0 && a.debug > 2{ - glog.Warningf("unable to find AS path for '%v'", query) - } - a.resC <- &res - } -} diff --git a/annotators/ris-annotator/main.go b/annotators/ris-annotator/main.go new file mode 100644 index 0000000..78694e6 --- /dev/null +++ b/annotators/ris-annotator/main.go @@ -0,0 +1,83 @@ +package main + +import ( + "flag" + "fmt" + "math" + "os" + "strconv" + "strings" + + "github.com/bio-routing/bio-rd/util/servicewrapper" + "github.com/bio-routing/tflow2/annotators/ris-annotator/server" + "github.com/bio-routing/tflow2/netflow" + "google.golang.org/grpc" + + ris "github.com/bio-routing/bio-rd/cmd/ris/api" + log "github.com/sirupsen/logrus" +) + +var ( + grpcPort = flag.Uint("grpc_port", 5432, "gRPC server port") + httpPort = flag.Uint("http_port", 5431, "HTTP server port") + risServer = flag.String("ris", "localhost:4321", "RIS gRPC server") + vrf = flag.String("vrf", "", "VRF") +) + +func main() { + flag.Parse() + + vrfID, err := parseVRF(*vrf) + if err != nil { + log.Errorf("Unable to parse VRF: %v", err) + os.Exit(1) + } + + c, err := grpc.Dial(*risServer, grpc.WithInsecure()) + if err != nil { + log.Errorf("grpc.Dial failed: %v", err) + os.Exit(1) + } + defer c.Close() + + s := server.New(ris.NewRoutingInformationServiceClient(c), vrfID) + interceptors := []grpc.UnaryServerInterceptor{} + srv, err := servicewrapper.New( + uint16(*grpcPort), + servicewrapper.HTTP(uint16(*httpPort)), + interceptors, + nil, + ) + if err != nil { + log.Errorf("failed to listen: %v", err) + os.Exit(1) + } + + netflow.RegisterAnnotatorServer(srv.GRPC(), s) + if err := srv.Serve(); err != nil { + log.Fatalf("failed to start server: %v", err) + } +} + +func parseVRF(v string) (uint64, error) { + if v == "" { + return 0, nil + } + + parts := strings.Split(v, ":") + if len(parts) != 2 { + return 0, fmt.Errorf("Invalid format: %q", v) + } + + asn, err := strconv.Atoi(parts[0]) + if err != nil { + return 0, err + } + + x, err := strconv.Atoi(parts[1]) + if err != nil { + return 0, err + } + + return uint64(asn)*uint64(math.Pow(2, 32)) + uint64(x), nil +} diff --git a/annotators/ris-annotator/server/server.go b/annotators/ris-annotator/server/server.go new file mode 100644 index 0000000..02fa719 --- /dev/null +++ b/annotators/ris-annotator/server/server.go @@ -0,0 +1,137 @@ +package server + +import ( + "context" + "encoding/json" + "fmt" + "net" + + "github.com/bio-routing/tflow2/netflow" + + ris "github.com/bio-routing/bio-rd/cmd/ris/api" + bnet "github.com/bio-routing/bio-rd/net" + routeapi "github.com/bio-routing/bio-rd/route/api" +) + +// Server implements a Netflow annotation server +type Server struct { + risClient ris.RoutingInformationServiceClient + vrfID uint64 +} + +// New creates a new server +func New(risClient ris.RoutingInformationServiceClient, vrfID uint64) *Server { + return &Server{ + risClient: risClient, + vrfID: vrfID, + } +} + +func getLastASN(route *routeapi.Route) uint32 { + if len(route.Paths) == 0 { + return 0 + } + + if route.Paths[0].BgpPath == nil { + return 0 + } + + x := uint32(0) + for _, seg := range route.Paths[0].BgpPath.AsPath { + if !seg.AsSequence { + continue + } + + x = seg.Asns[len(seg.Asns)-1] + } + + return x +} + +func getFirstASN(route *routeapi.Route) uint32 { + if len(route.Paths) == 0 { + return 0 + } + + if route.Paths[0].BgpPath == nil { + return 0 + } + + if len(route.Paths[0].BgpPath.AsPath) == 0 { + return 0 + } + + if !route.Paths[0].BgpPath.AsPath[0].AsSequence { + return 0 + } + + if len(route.Paths[0].BgpPath.AsPath[0].Asns) == 0 { + return 0 + } + + return route.Paths[0].BgpPath.AsPath[0].Asns[0] +} + +// Annotate annotates a flow +func (s *Server) Annotate(ctx context.Context, nf *netflow.Flow) (*netflow.Flow, error) { + destIP, err := bnet.IPFromBytes(nf.DstAddr) + if err != nil { + return nil, fmt.Errorf("Invalid IP: %v", nf.DstAddr) + } + req := &ris.LPMRequest{ + Router: net.IP(nf.Router).String(), + VrfId: s.vrfID, + Pfx: bnet.NewPfx(destIP, 32).ToProto(), + } + + res, err := s.risClient.LPM(ctx, req) + if err != nil { + jsonReq, _ := json.Marshal(req) + return nil, fmt.Errorf("LPM failed: %v (req: %q)", err, string(jsonReq)) + } + + if len(res.Routes) == 0 { + return nil, fmt.Errorf("Prefix not found (addr=%s, router=%s, vrf=%d)", destIP.String(), net.IP(nf.Router).String(), req.VrfId) + } + + n := bnet.NewPrefixFromProtoPrefix(*res.Routes[len(res.Routes)-1].Pfx).GetIPNet() + + nf.DstPfx = &netflow.Pfx{ + IP: n.IP, + Mask: n.Mask, + } + + nf.DstAs = getLastASN(res.Routes[len(res.Routes)-1]) + nf.NextHopAs = getFirstASN(res.Routes[len(res.Routes)-1]) + + srcIP, err := bnet.IPFromBytes(nf.SrcAddr) + if err != nil { + return nil, fmt.Errorf("Invalid IP: %v", nf.SrcAddr) + } + srcReq := &ris.LPMRequest{ + Router: net.IP(nf.Router).String(), + VrfId: s.vrfID, + Pfx: bnet.NewPfx(srcIP, 32).ToProto(), + } + + res, err = s.risClient.LPM(ctx, srcReq) + if err != nil { + jsonReq, _ := json.Marshal(req) + return nil, fmt.Errorf("LPM failed: %v (req: %q)", err, string(jsonReq)) + } + + if len(res.Routes) == 0 { + return nil, fmt.Errorf("Prefix not found (addr=%s, router=%s, vrf=%d)", destIP.String(), net.IP(nf.Router).String(), req.VrfId) + } + + n = bnet.NewPrefixFromProtoPrefix(*res.Routes[len(res.Routes)-1].Pfx).GetIPNet() + + nf.SrcPfx = &netflow.Pfx{ + IP: n.IP, + Mask: n.Mask, + } + + nf.SrcAs = getLastASN(res.Routes[len(res.Routes)-1]) + + return nf, nil +} diff --git a/avltree/avtltree.go b/avltree/avtltree.go index 4930419..233bd58 100644 --- a/avltree/avtltree.go +++ b/avltree/avtltree.go @@ -13,10 +13,10 @@ package avltree import ( - "fmt" "sync" - "github.com/golang/glog" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" ) // Comparable is an interface used to pass compare functions to this avltree @@ -37,7 +37,7 @@ type TreeNode struct { left *TreeNode right *TreeNode key interface{} - Value interface{} + Values []interface{} height int64 issmaller Comparable } @@ -60,10 +60,13 @@ func (root *TreeNode) getHeight() int64 { // TreeMinValueNode returns the node with the minimal key in the tree func (root *TreeNode) minValueNode() *TreeNode { - for root.left != nil { - return root.left.minValueNode() + if root.left == nil { + // If no left element is available, we are at the smallest key + // so we return ourself + return root } - return nil + + return root.left.minValueNode() } // search searches element with key `key` in tree with root `root` @@ -154,18 +157,20 @@ func (root *TreeNode) delete(key interface{}) *TreeNode { tmp := root.minValueNode() root.key = tmp.key - root.Value = tmp.Value + root.Values = tmp.Values root.right = root.right.delete(tmp.key) root.height = max(root.left.getHeight(), root.right.getHeight()) + 1 balance := root.getBalance() - if balance > 1 && root.left.getBalance() >= 0 { - return root.rightRotate() - } else if balance > 1 && root.left.getBalance() < 0 { + if balance > 1 { + if root.left.getBalance() >= 0 { + return root.rightRotate() + } return root.leftRightRotate() - } else if balance < -1 && root.right.getBalance() <= 0 { - return root.leftRotate() - } else if balance < -1 && root.right.getBalance() > 0 { + } else if balance < -1 { + if root.right.getBalance() <= 0 { + return root.leftRotate() + } return root.rightLeftRotate() } } else { @@ -175,7 +180,7 @@ func (root *TreeNode) delete(key interface{}) *TreeNode { return root } -// isEqual is a generic function that compares a and b of any comprable type +// isEqual is a generic function that compares a and b of any comparable type // return true if a and b are equal, otherwise false func isEqual(a interface{}, b interface{}) bool { return a == b @@ -189,7 +194,7 @@ func New() *Tree { // Insert inserts an element to tree with root `t` func (t *Tree) Insert(key interface{}, value interface{}, issmaller Comparable) (new *TreeNode, err error) { if t == nil { - return nil, fmt.Errorf("unable to insert into nil tree") + return nil, errors.Errorf("unable to insert into nil tree") } t.lock.Lock() defer t.lock.Unlock() @@ -202,10 +207,12 @@ func (t *Tree) Insert(key interface{}, value interface{}, issmaller Comparable) func (root *TreeNode) insert(key interface{}, value interface{}, issmaller Comparable) (*TreeNode, *TreeNode) { if root == nil { root = &TreeNode{ - left: nil, - right: nil, - key: key, - Value: value, + left: nil, + right: nil, + key: key, + Values: []interface{}{ + value, + }, height: 0, issmaller: issmaller, } @@ -213,6 +220,7 @@ func (root *TreeNode) insert(key interface{}, value interface{}, issmaller Compa } if isEqual(key, root.key) { + root.Values = append(root.Values, value) return root, root } @@ -320,7 +328,7 @@ func Intersection(candidates []*Tree) (res *Tree) { return } - glog.Infof("finding common elements in %d and %d elements", a.Count, b.Count) + log.Infof("finding common elements in %d and %d elements", a.Count, b.Count) chRes <- a.Intersection(b) }(chA[i], chB[i], chRet[i]) chA[i] <- candidates[i*2] @@ -415,7 +423,7 @@ func (root *TreeNode) dump() (res []interface{}) { tmp := root.left.dump() res = append(res, tmp...) } - res = append(res, root.Value) + res = append(res, root.Values...) if root.right != nil { tmp := root.right.dump() res = append(res, tmp...) @@ -440,18 +448,20 @@ func (root *TreeNode) topN(n int) (res []interface{}) { if root == nil { return res } + if root.right != nil { tmp := root.right.topN(n) for _, k := range tmp { if len(res) == n { return res } + res = append(res, k) } } if len(res) < n { - res = append(res, root.Value) + res = append(res, root.Values...) } if len(res) == n { diff --git a/config-example.yml b/config-example.yml new file mode 100644 index 0000000..801226a --- /dev/null +++ b/config-example.yml @@ -0,0 +1,39 @@ +--- +aggregation_period: 60 +default_snmp_community: "public" +debug: 0 +compression_level: 6 +data_dir: "data" +anonymize: false +cache_time: 1800 + +netflow_v9: + enabled: true + listen: ":2055" + +ipfix: + enabled: true + listen: ":4739" + +sflow: + enable: true + listen: ":6343" + +frontend: + enable: true + listen: ":4444" + +bgp_augmentation: + enabled: false + bird_socket: "/var/run/bird/bird.ctl" + bird6_socket: "/var/run/bird/bird6.ctl" + +annotators: +- name: "BGP Annotator" + target: "localhost:21222" + +agents: +- name: "bb01.fra01" + ip_address: "127.0.0.1" + snmp_community: "public" + samplerate: 1000 diff --git a/config/config.go b/config/config.go new file mode 100644 index 0000000..0fcb69f --- /dev/null +++ b/config/config.go @@ -0,0 +1,228 @@ +package config + +import ( + "io/ioutil" + + "github.com/pkg/errors" + yaml "gopkg.in/yaml.v2" +) + +// Config represents a yaml config file +type Config struct { + AggregationPeriod int64 `yaml:"aggregation_period"` + DefaultSNMPCommunity string `yaml:"default_snmp_community"` + InterfaceMapperRefreshPeriod int64 `yaml:"interface_mapper_refresh_period"` + Debug int `yaml:"debug"` + CompressionLevel *int `yaml:"compression_level"` + DataDir string `yaml:"data_dir"` + Anonymize bool `yaml:"anonymize"` + CacheTime *int64 `yaml:"cache_time"` + + NetflowV9 *Server `yaml:"netflow_v9"` + IPFIX *Server `yaml:"ipfix"` + Sflow *Server `yaml:"sflow"` + Frontend *Server `yaml:"frontend"` + BGPAugmentation *BGPAugment `yaml:"bgp_augmentation"` + Agents []Agent `yaml:"agents"` + Annotators []Annotator `yaml:"annotators"` + + AgentsNameByIP map[string]string +} + +// Annotator represents annotator configuration +type Annotator struct { + Name string + Target string +} + +// BGPAugment represents BGP augmentation configuration +type BGPAugment struct { + Enabled bool `yaml:"enabled"` + BIRDSocket string `yaml:"bird_socket"` + BIRD6Socket string `yaml:"bird6_socket"` +} + +// Server represents a server config +type Server struct { + Enabled *bool `yaml:"enabled"` + Listen string `yaml:"listen"` +} + +// Agent represents an agent config +type Agent struct { + Name string `yaml:"name"` + IPAddress string `yaml:"ip_address"` + SNMPCommunity string `yaml:"snmp_community"` + SampleRate uint64 `yaml:"sample_rate"` +} + +var ( + dfltAggregationPeriod = int64(60) + dftlIntfMapperRefreshPeriod = int64(30) + dfltDefaultSNMPCommunity = "public" + dfltSampleRate = uint64(1) + dfltCompressionLevel = 6 + dfltDataDir = "data" + dfltCacheTime = int64(1800) + + dfltNetflowV9Listen = ":2055" + dfltNetflowV9 = Server{ + Enabled: boolPtr(true), + Listen: dfltNetflowV9Listen, + } + + dfltServerEnabled = boolPtr(true) + + dfltIPFIXListen = ":4739" + dfltIPFIX = Server{ + Enabled: boolPtr(true), + Listen: dfltIPFIXListen, + } + + dfltSflowListen = ":6343" + dfltSflow = Server{ + Enabled: boolPtr(true), + Listen: dfltSflowListen, + } + + dfltFrontendListen = ":4444" + dfltFrontend = Server{ + Enabled: boolPtr(true), + Listen: dfltFrontendListen, + } + + dfltBIRDSocket = "/var/run/bird/bird.ctl" + dfltBIRD6Socket = "/var/run/bird/bird6.ctl" + dfltBGPAugmentation = BGPAugment{ + BIRDSocket: dfltBIRDSocket, + BIRD6Socket: dfltBIRD6Socket, + } +) + +// New reads a configuration file and returns a Config +func New(filename string) (*Config, error) { + cfgFile, err := ioutil.ReadFile(filename) + if err != nil { + return nil, errors.Wrapf(err, "Unable to read config file %s", filename) + } + + cfg := &Config{} + err = yaml.Unmarshal(cfgFile, cfg) + if err != nil { + return nil, errors.Wrap(err, "Unable to parse yaml file") + } + + cfg.defaults() + + cfg.AgentsNameByIP = make(map[string]string) + for _, agent := range cfg.Agents { + if _, ok := cfg.AgentsNameByIP[agent.IPAddress]; ok { + return nil, errors.Errorf("Duplicate agent: %s", agent.Name) + } + cfg.AgentsNameByIP[agent.IPAddress] = agent.Name + } + + return cfg, nil +} + +func (cfg *Config) defaults() { + if cfg.AggregationPeriod == 0 { + cfg.AggregationPeriod = dfltAggregationPeriod + } + if cfg.InterfaceMapperRefreshPeriod == 0 { + cfg.InterfaceMapperRefreshPeriod = dftlIntfMapperRefreshPeriod + } + if cfg.DefaultSNMPCommunity == "" { + cfg.DefaultSNMPCommunity = dfltDefaultSNMPCommunity + } + if cfg.CompressionLevel == nil { + cfg.CompressionLevel = intPtr(dfltCompressionLevel) + } + if cfg.DataDir == "" { + cfg.DataDir = dfltDataDir + } + if cfg.CacheTime == nil { + cfg.CacheTime = int64Ptr(dfltCacheTime) + } + + if cfg.NetflowV9 == nil { + cfg.NetflowV9 = srvPtr(dfltNetflowV9) + } + if cfg.NetflowV9.Listen == "" { + cfg.NetflowV9.Listen = dfltNetflowV9Listen + } + if cfg.NetflowV9.Enabled == nil { + cfg.NetflowV9.Enabled = dfltServerEnabled + } + + if cfg.IPFIX == nil { + cfg.IPFIX = srvPtr(dfltIPFIX) + } + if cfg.IPFIX.Listen == "" { + cfg.IPFIX.Listen = dfltIPFIXListen + } + if cfg.IPFIX.Enabled == nil { + cfg.IPFIX.Enabled = dfltServerEnabled + } + + if cfg.Sflow == nil { + cfg.Sflow = srvPtr(dfltSflow) + } + if cfg.Sflow.Listen == "" { + cfg.Sflow.Listen = dfltSflowListen + } + if cfg.Sflow.Enabled == nil { + cfg.Sflow.Enabled = dfltServerEnabled + } + + if cfg.Frontend == nil { + cfg.Frontend = srvPtr(dfltFrontend) + } + if cfg.Frontend.Listen == "" { + cfg.Frontend.Listen = dfltFrontendListen + } + if cfg.Frontend.Enabled == nil { + cfg.Frontend.Enabled = dfltServerEnabled + } + + if cfg.BGPAugmentation == nil { + cfg.BGPAugmentation = &dfltBGPAugmentation + } + if cfg.BGPAugmentation.BIRDSocket == "" { + cfg.BGPAugmentation.BIRDSocket = dfltBIRDSocket + } + if cfg.BGPAugmentation.BIRD6Socket == "" { + cfg.BGPAugmentation.BIRD6Socket = dfltBIRD6Socket + } + + if cfg.Agents != nil { + for key, agent := range cfg.Agents { + if agent.SNMPCommunity == "" { + cfg.Agents[key].SNMPCommunity = cfg.DefaultSNMPCommunity + } + if agent.SampleRate == 0 { + cfg.Agents[key].SampleRate = dfltSampleRate + } + } + } +} + +func uint64Ptr(x uint64) *uint64 { + return &x +} + +func srvPtr(srv Server) *Server { + return &srv +} + +func boolPtr(v bool) *bool { + return &v +} + +func intPtr(x int) *int { + return &x +} + +func int64Ptr(x int64) *int64 { + return &x +} diff --git a/convert/convert.go b/convert/convert.go index 6c66d19..c601796 100644 --- a/convert/convert.go +++ b/convert/convert.go @@ -20,7 +20,7 @@ import ( "strings" ) -// IPByteSlice converts a string that contians an IP address into byte slice +// IPByteSlice converts a string that contains an IP address into byte slice func IPByteSlice(ip string) []byte { ret := net.ParseIP(ip) if strings.Contains(ip, ".") { @@ -78,6 +78,13 @@ func UintX(data []byte) (ret uint64) { return ret } +// Uint8Byte converts a uint8 to a byte slice in BigEndian +func Uint8Byte(data uint8) (ret []byte) { + buf := new(bytes.Buffer) + binary.Write(buf, binary.BigEndian, data) + return buf.Bytes() +} + // Uint16Byte converts a uint16 to a byte slice in BigEndian func Uint16Byte(data uint16) (ret []byte) { buf := new(bytes.Buffer) @@ -99,6 +106,13 @@ func Int64Byte(data int64) (ret []byte) { return buf.Bytes() } +// Uint64Byte converts a uint64 to a byte slice in BigEndian +func Uint64Byte(data uint64) (ret []byte) { + buf := new(bytes.Buffer) + binary.Write(buf, binary.BigEndian, data) + return buf.Bytes() +} + // Reverse reverses byte slice without allocating new memory func Reverse(data []byte) []byte { n := len(data) diff --git a/convert/convert_test.go b/convert/convert_test.go index 87ec85c..f6be39a 100644 --- a/convert/convert_test.go +++ b/convert/convert_test.go @@ -11,7 +11,11 @@ package convert -import "testing" +import ( + "testing" + + "github.com/stretchr/testify/assert" +) func TestIPByteSlice(t *testing.T) { tests := []struct { @@ -179,3 +183,27 @@ func sliceEq(a []byte, b []byte) bool { } return true } + +func TestUint16Byte(t *testing.T) { + tests := []struct { + name string + input uint16 + expected []byte + }{ + { + name: "Test #1", + input: 23, + expected: []byte{0, 23}, + }, + { + name: "Test #1", + input: 256, + expected: []byte{1, 0}, + }, + } + + for _, test := range tests { + res := Uint16Byte(test.input) + assert.Equal(t, test.expected, res) + } +} diff --git a/database/breakdown.go b/database/breakdown.go new file mode 100644 index 0000000..b433342 --- /dev/null +++ b/database/breakdown.go @@ -0,0 +1,298 @@ +package database + +import ( + "bytes" + "fmt" + "net" + + "github.com/bio-routing/tflow2/avltree" + "github.com/bio-routing/tflow2/iana" + "github.com/bio-routing/tflow2/intfmapper" + "github.com/bio-routing/tflow2/netflow" + "github.com/pkg/errors" + + log "github.com/sirupsen/logrus" +) + +// BreakdownKey is the key used for the brakedown map +type BreakdownKey [FieldMax]string + +// BreakdownMap maps breakdown keys to values +type BreakdownMap map[BreakdownKey]uint64 + +// BreakdownFlags defines by what fields data should be broken down in a query +type BreakdownFlags struct { + Family bool + SrcAddr bool + DstAddr bool + Protocol bool + IntIn bool + IntOut bool + NextHop bool + SrcAsn bool + DstAsn bool + NextHopAsn bool + SrcPfx bool + DstPfx bool + SrcPort bool + DstPort bool + IntInName bool + IntOutName bool +} + +var breakdownLabels = map[int]string{ + FieldFamily: "Family", + FieldSrcAddr: "SrcAddr", + FieldDstAddr: "DstAddr", + FieldProtocol: "Protocol", + FieldIntIn: "IntIn", + FieldIntOut: "IntOut", + FieldNextHop: "NextHop", + FieldSrcAs: "SrcAsn", + FieldDstAs: "DstAsn", + FieldNextHopAs: "NextHopAsn", + FieldSrcPfx: "SrcPfx", + FieldDstPfx: "DstPfx", + FieldSrcPort: "SrcPort", + FieldDstPort: "DstPort", + FieldIntInName: "IntInName", + FieldIntOutName: "IntOutName", +} + +// GetBreakdownLabels returns a sorted list of known breakdown labels +func GetBreakdownLabels() []string { + return []string{ + breakdownLabels[FieldFamily], + breakdownLabels[FieldSrcAddr], + breakdownLabels[FieldDstAddr], + breakdownLabels[FieldProtocol], + breakdownLabels[FieldIntIn], + breakdownLabels[FieldIntOut], + breakdownLabels[FieldNextHop], + breakdownLabels[FieldSrcAs], + breakdownLabels[FieldDstAs], + breakdownLabels[FieldNextHopAs], + breakdownLabels[FieldSrcPfx], + breakdownLabels[FieldDstPfx], + breakdownLabels[FieldSrcPort], + breakdownLabels[FieldDstPort], + breakdownLabels[FieldIntInName], + breakdownLabels[FieldIntOutName], + } +} + +// Join formats the keys and joins them with commas +func (bk *BreakdownKey) Join(format string) string { + var buffer bytes.Buffer + for i, value := range bk { + if value == "" { + continue + } + if buffer.Len() > 0 { + buffer.WriteRune(',') + } + buffer.WriteString(fmt.Sprintf(format, breakdownLabels[i], value)) + } + + return buffer.String() +} + +// Set enables the flags in the given list +func (bf *BreakdownFlags) Set(keys []string) error { + for _, key := range keys { + switch key { + case breakdownLabels[FieldFamily]: + bf.Family = true + case breakdownLabels[FieldSrcAddr]: + bf.SrcAddr = true + case breakdownLabels[FieldDstAddr]: + bf.DstAddr = true + case breakdownLabels[FieldProtocol]: + bf.Protocol = true + case breakdownLabels[FieldIntIn]: + bf.IntIn = true + case breakdownLabels[FieldIntOut]: + bf.IntOut = true + case breakdownLabels[FieldNextHop]: + bf.NextHop = true + case breakdownLabels[FieldSrcAs]: + bf.SrcAsn = true + case breakdownLabels[FieldDstAs]: + bf.DstAsn = true + case breakdownLabels[FieldNextHopAs]: + bf.NextHopAsn = true + case breakdownLabels[FieldSrcPfx]: + bf.SrcPfx = true + case breakdownLabels[FieldDstPfx]: + bf.DstPfx = true + case breakdownLabels[FieldSrcPort]: + bf.SrcPort = true + case breakdownLabels[FieldDstPort]: + bf.DstPort = true + case breakdownLabels[FieldIntInName]: + bf.IntInName = true + case breakdownLabels[FieldIntOutName]: + bf.IntOutName = true + + default: + return errors.Errorf("invalid breakdown key: %s", key) + } + } + return nil +} + +// Count returns the number of enabled breakdown flags +func (bf *BreakdownFlags) Count() (count int) { + + if bf.Family { + count++ + } + if bf.SrcAddr { + count++ + } + if bf.DstAddr { + count++ + } + if bf.Protocol { + count++ + } + if bf.IntIn { + count++ + } + if bf.IntOut { + count++ + } + if bf.NextHop { + count++ + } + if bf.SrcAsn { + count++ + } + if bf.DstAsn { + count++ + } + if bf.NextHopAsn { + count++ + } + if bf.SrcPfx { + count++ + } + if bf.DstPfx { + count++ + } + if bf.SrcPort { + count++ + } + if bf.DstPort { + count++ + } + if bf.IntInName { + count++ + } + if bf.IntOutName { + count++ + } + + return +} + +// breakdown build all possible relevant keys of flows for flows in tree `node` +// and builds sums for each key in order to allow us to find top combinations +func breakdown(node *avltree.TreeNode, vals ...interface{}) { + if len(vals) != 5 { + log.Errorf("lacking arguments") + return + } + + intfMap := vals[0].(intfmapper.InterfaceNameByID) + iana := vals[1].(*iana.IANA) + bd := vals[2].(BreakdownFlags) + sums := vals[3].(*concurrentResSum) + buckets := vals[4].(BreakdownMap) + + for _, flow := range node.Values { + fl := flow.(*netflow.Flow) + + key := BreakdownKey{} + + if bd.Family { + key[FieldFamily] = fmt.Sprintf("%d", fl.Family) + } + if bd.SrcAddr { + key[FieldSrcAddr] = net.IP(fl.SrcAddr).String() + } + if bd.DstAddr { + key[FieldDstAddr] = net.IP(fl.DstAddr).String() + } + if bd.Protocol { + protoMap := iana.GetIPProtocolsByID() + if _, ok := protoMap[uint8(fl.Protocol)]; ok { + key[FieldProtocol] = fmt.Sprintf("%s", protoMap[uint8(fl.Protocol)]) + } else { + key[FieldProtocol] = fmt.Sprintf("%d", fl.Protocol) + } + } + if bd.IntIn { + key[FieldIntIn] = fmt.Sprintf("%d", fl.IntIn) + } + if bd.IntOut { + key[FieldIntOut] = fmt.Sprintf("%d", fl.IntOut) + } + if bd.IntInName { + if _, ok := intfMap[uint16(fl.IntIn)]; ok { + name := intfMap[uint16(fl.IntIn)] + key[FieldIntIn] = fmt.Sprintf("%s", name) + } else { + key[FieldIntIn] = fmt.Sprintf("%d", fl.IntIn) + } + } + if bd.IntOutName { + if _, ok := intfMap[uint16(fl.IntOut)]; ok { + name := intfMap[uint16(fl.IntOut)] + key[FieldIntOut] = fmt.Sprintf("%s", name) + } else { + key[FieldIntOut] = fmt.Sprintf("%d", fl.IntIn) + } + } + if bd.NextHop { + key[FieldNextHop] = net.IP(fl.NextHop).String() + } + if bd.SrcAsn { + key[FieldSrcAs] = fmt.Sprintf("%d", fl.SrcAs) + } + if bd.DstAsn { + key[FieldDstAs] = fmt.Sprintf("%d", fl.DstAs) + } + if bd.NextHopAsn { + key[FieldNextHopAs] = fmt.Sprintf("%d", fl.NextHopAs) + } + if bd.SrcPfx { + if fl.SrcPfx != nil { + key[FieldSrcPfx] = fl.SrcPfx.ToIPNet().String() + } else { + key[FieldSrcPfx] = "0.0.0.0/0" + } + } + if bd.DstPfx { + if fl.DstPfx != nil { + key[FieldDstPfx] = fl.DstPfx.ToIPNet().String() + } else { + key[FieldDstPfx] = "0.0.0.0/0" + } + } + if bd.SrcPort { + key[FieldSrcPort] = fmt.Sprintf("%d", fl.SrcPort) + } + if bd.DstPort { + key[FieldDstPort] = fmt.Sprintf("%d", fl.DstPort) + } + + // Build sum for key + buckets[key] += fl.Size * fl.Samplerate + + // Build overall sum + sums.Lock.Lock() + sums.Values[key] += fl.Size * fl.Samplerate + sums.Lock.Unlock() + } +} diff --git a/database/breakdown_test.go b/database/breakdown_test.go new file mode 100644 index 0000000..7494a20 --- /dev/null +++ b/database/breakdown_test.go @@ -0,0 +1,71 @@ +package database + +import ( + "strconv" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestBreakdownKeyString(t *testing.T) { + assert := assert.New(t) + + // Empty Key + key := BreakdownKey{} + assert.Equal("", key.Join("%s:%s")) + + // Set one key + key.set("DstPort", "23") + assert.Equal(key.get("DstPort"), "23") + assert.Equal("DstPort:23", key.Join("%s:%s")) + + // Set all keys + for i := range breakdownLabels { + key[i] = strconv.Itoa(i) + } + assert.Equal("Family:2,SrcAddr:3,DstAddr:4,Protocol:5,IntIn:6,IntOut:7,NextHop:8,SrcAsn:9,DstAsn:10,NextHopAsn:11,SrcPfx:12,DstPfx:13,SrcPort:14,DstPort:15,IntInName:16,IntOutName:17", key.Join("%s:%s")) +} + +func TestBreakdownFlags(t *testing.T) { + assert := assert.New(t) + + // Defaults + key := BreakdownFlags{} + assert.False(key.DstAddr) + + // Enable all + assert.NoError(key.Set([]string{"Family", "SrcAddr", "DstAddr", "Protocol", "IntIn", "IntOut", "NextHop", "SrcAsn", "DstAsn", "NextHopAsn", "SrcPfx", "DstPfx", "SrcPort", "DstPort"})) + assert.True(key.DstAddr) + assert.Equal(14, key.Count()) + + // Invalid key + assert.EqualError(key.Set([]string{"foobar"}), "invalid breakdown key: foobar") +} + +func TestGetBreakdownLabels(t *testing.T) { + assert := assert.New(t) + + labels := GetBreakdownLabels() + assert.NotNil(labels) + assert.Contains(labels, "SrcAddr") +} + +// reverse mapping for breakdownLabels +func breakdownIndex(key string) int { + for i, k := range breakdownLabels { + if k == key { + return i + } + } + panic("invalid breakdown label: " + key) +} + +// set Sets the value of a field +func (bk *BreakdownKey) set(key string, value string) { + bk[breakdownIndex(key)] = value +} + +// get returns the value of a field +func (bk *BreakdownKey) get(key string) string { + return bk[breakdownIndex(key)] +} diff --git a/database/database.go b/database/database.go index a2916e0..01f54f9 100644 --- a/database/database.go +++ b/database/database.go @@ -22,82 +22,54 @@ import ( "time" "unsafe" - "github.com/golang/glog" + "github.com/bio-routing/tflow2/avltree" + "github.com/bio-routing/tflow2/iana" + "github.com/bio-routing/tflow2/intfmapper" + "github.com/bio-routing/tflow2/netflow" + "github.com/bio-routing/tflow2/nfserver" "github.com/golang/protobuf/proto" - "github.com/google/tflow2/avltree" - "github.com/google/tflow2/netflow" - "github.com/google/tflow2/nfserver" -) - -// TimeGroup groups all indices to flows of a particular router at a particular -// time into one object -type TimeGroup struct { - Any map[int]*avltree.Tree // Workaround: Why a map? Because: cannot assign to flows[fl.Timestamp][rtr].Any - SrcAddr map[string]*avltree.Tree - DstAddr map[string]*avltree.Tree - Protocol map[uint32]*avltree.Tree - IntIn map[uint32]*avltree.Tree - IntOut map[uint32]*avltree.Tree - NextHop map[string]*avltree.Tree - SrcAs map[uint32]*avltree.Tree - DstAs map[uint32]*avltree.Tree - NextHopAs map[uint32]*avltree.Tree - SrcPfx map[string]*avltree.Tree - DstPfx map[string]*avltree.Tree - SrcPort map[uint32]*avltree.Tree - DstPort map[uint32]*avltree.Tree - Locks *LockGroup -} -// LockGroup is a group of locks suitable to lock any particular member of TimeGroup -type LockGroup struct { - Any sync.RWMutex - SrcAddr sync.RWMutex - DstAddr sync.RWMutex - Protocol sync.RWMutex - IntIn sync.RWMutex - IntOut sync.RWMutex - NextHop sync.RWMutex - SrcAs sync.RWMutex - DstAs sync.RWMutex - NextHopAs sync.RWMutex - SrcPfx sync.RWMutex - DstPfx sync.RWMutex - SrcPort sync.RWMutex - DstPort sync.RWMutex -} + log "github.com/sirupsen/logrus" +) // FlowsByTimeRtr holds all keys (and thus is the only way) to our flows -type FlowsByTimeRtr map[int64]map[string]TimeGroup +type FlowsByTimeRtr map[int64]map[string]*TimeGroup // FlowDatabase represents a flow database object type FlowDatabase struct { - flows FlowsByTimeRtr - lock sync.RWMutex - maxAge int64 - aggregation int64 - lastDump int64 - compLevel int - samplerate int - storage string - debug int - anonymize bool - Input chan *netflow.Flow + flows FlowsByTimeRtr + lock sync.RWMutex + maxAge int64 + aggregation int64 + lastDump int64 + compLevel int + samplerate int + storage string + debug int + anonymize bool + Input chan *netflow.Flow + intfMapper intfmapper.IntfMapperInterface + agentsNameByIP map[string]string + iana *iana.IANA } +const anyIndex = uint8(0) + // New creates a new FlowDatabase and returns a pointer to it -func New(aggregation int64, maxAge int64, numAddWorker int, samplerate int, debug int, compLevel int, storage string, anonymize bool) *FlowDatabase { +func New(aggregation int64, maxAge int64, numAddWorker int, debug int, compLevel int, storage string, anonymize bool, intfMapper intfmapper.IntfMapperInterface, agentsNameByIP map[string]string, iana *iana.IANA) *FlowDatabase { flowDB := &FlowDatabase{ - maxAge: maxAge, - aggregation: aggregation, - compLevel: compLevel, - samplerate: samplerate, - Input: make(chan *netflow.Flow), - lastDump: time.Now().Unix(), - storage: storage, - debug: debug, - flows: make(FlowsByTimeRtr), - anonymize: anonymize, + maxAge: maxAge, + aggregation: aggregation, + compLevel: compLevel, + Input: make(chan *netflow.Flow), + lastDump: time.Now().Unix(), + storage: storage, + debug: debug, + flows: make(FlowsByTimeRtr), + anonymize: anonymize, + intfMapper: intfMapper, + agentsNameByIP: agentsNameByIP, + iana: iana, } for i := 0; i < numAddWorker; i++ { @@ -117,163 +89,108 @@ func New(aggregation int64, maxAge int64, numAddWorker int, samplerate int, debu } }() - go func() { - for { - // Set a timer and wait for our next run - event := time.NewTimer(time.Duration(flowDB.aggregation) * time.Second) - <-event.C - flowDB.Dumper() - } - }() + if flowDB.storage != "" { + go func() { + for { + // Set a timer and wait for our next run + event := time.NewTimer(time.Duration(flowDB.aggregation) * time.Second) + <-event.C + flowDB.Dumper() + } + }() + } } return flowDB } -// Add adds flow `fl` to database fdb -func (fdb *FlowDatabase) Add(fl *netflow.Flow) { - // build indices for map access - rtrip := net.IP(fl.Router) - rtr := rtrip.String() - srcAddr := net.IP(fl.SrcAddr).String() - dstAddr := net.IP(fl.DstAddr).String() - nextHopAddr := net.IP(fl.NextHop).String() - srcPfx := fl.SrcPfx.String() - dstPfx := fl.DstPfx.String() - +func (fdb *FlowDatabase) getTimeGroup(fl *netflow.Flow, rtr string) *TimeGroup { fdb.lock.Lock() + defer fdb.lock.Unlock() + // Check if timestamp entry exists already. If not, create it. - if _, ok := fdb.flows[fl.Timestamp]; !ok { - fdb.flows[fl.Timestamp] = make(map[string]TimeGroup) + flows, ok := fdb.flows[fl.Timestamp] + if !ok { + flows = make(map[string]*TimeGroup) + fdb.flows[fl.Timestamp] = flows } // Check if router entry exists already. If not, create it. - if _, ok := fdb.flows[fl.Timestamp][rtr]; !ok { - fdb.flows[fl.Timestamp][rtr] = TimeGroup{ - Any: make(map[int]*avltree.Tree), - SrcAddr: make(map[string]*avltree.Tree), - DstAddr: make(map[string]*avltree.Tree), - Protocol: make(map[uint32]*avltree.Tree), - IntIn: make(map[uint32]*avltree.Tree), - IntOut: make(map[uint32]*avltree.Tree), - NextHop: make(map[string]*avltree.Tree), - SrcAs: make(map[uint32]*avltree.Tree), - DstAs: make(map[uint32]*avltree.Tree), - NextHopAs: make(map[uint32]*avltree.Tree), - SrcPfx: make(map[string]*avltree.Tree), - DstPfx: make(map[string]*avltree.Tree), - SrcPort: make(map[uint32]*avltree.Tree), - DstPort: make(map[uint32]*avltree.Tree), - Locks: &LockGroup{}, + timeGroup, ok := flows[rtr] + if !ok { + timeGroup = &TimeGroup{ + Any: newMapTree(), + SrcAddr: newMapTree(), + DstAddr: newMapTree(), + Protocol: newMapTree(), + IntIn: newMapTree(), + IntOut: newMapTree(), + NextHop: newMapTree(), + SrcAs: newMapTree(), + DstAs: newMapTree(), + NextHopAs: newMapTree(), + SrcPfx: newMapTree(), + DstPfx: newMapTree(), + SrcPort: newMapTree(), + DstPort: newMapTree(), + InterfaceIDByName: fdb.intfMapper.GetInterfaceIDByName(rtr), } + flows[rtr] = timeGroup } - fdb.lock.Unlock() - - fdb.lock.RLock() - defer fdb.lock.RUnlock() - if _, ok := fdb.flows[fl.Timestamp]; !ok { - glog.Warningf("stopped adding data for %d: already deleted", fl.Timestamp) - return - } - - locks := fdb.flows[fl.Timestamp][rtr].Locks - - // Start the actual insertion into indices - locks.Any.Lock() - if fdb.flows[fl.Timestamp][rtr].Any[0] == nil { - fdb.flows[fl.Timestamp][rtr].Any[0] = avltree.New() - } - fdb.flows[fl.Timestamp][rtr].Any[0].Insert(fl, fl, ptrIsSmaller) - locks.Any.Unlock() - - locks.SrcAddr.Lock() - if fdb.flows[fl.Timestamp][rtr].SrcAddr[srcAddr] == nil { - fdb.flows[fl.Timestamp][rtr].SrcAddr[srcAddr] = avltree.New() - } - fdb.flows[fl.Timestamp][rtr].SrcAddr[srcAddr].Insert(fl, fl, ptrIsSmaller) - locks.SrcAddr.Unlock() - - locks.DstAddr.Lock() - if fdb.flows[fl.Timestamp][rtr].DstAddr[dstAddr] == nil { - fdb.flows[fl.Timestamp][rtr].DstAddr[dstAddr] = avltree.New() - } - fdb.flows[fl.Timestamp][rtr].DstAddr[dstAddr].Insert(fl, fl, ptrIsSmaller) - locks.DstAddr.Unlock() - - locks.Protocol.Lock() - if fdb.flows[fl.Timestamp][rtr].Protocol[fl.Protocol] == nil { - fdb.flows[fl.Timestamp][rtr].Protocol[fl.Protocol] = avltree.New() - } - fdb.flows[fl.Timestamp][rtr].Protocol[fl.Protocol].Insert(fl, fl, ptrIsSmaller) - locks.Protocol.Unlock() - - locks.IntIn.Lock() - if fdb.flows[fl.Timestamp][rtr].IntIn[fl.IntIn] == nil { - fdb.flows[fl.Timestamp][rtr].IntIn[fl.IntIn] = avltree.New() - } - fdb.flows[fl.Timestamp][rtr].IntIn[fl.IntIn].Insert(fl, fl, ptrIsSmaller) - locks.IntIn.Unlock() - locks.IntOut.Lock() - if fdb.flows[fl.Timestamp][rtr].IntOut[fl.IntOut] == nil { - fdb.flows[fl.Timestamp][rtr].IntOut[fl.IntOut] = avltree.New() - } - fdb.flows[fl.Timestamp][rtr].IntOut[fl.IntOut].Insert(fl, fl, ptrIsSmaller) - locks.IntOut.Unlock() + return timeGroup +} - locks.NextHop.Lock() - if fdb.flows[fl.Timestamp][rtr].NextHop[nextHopAddr] == nil { - fdb.flows[fl.Timestamp][rtr].NextHop[nextHopAddr] = avltree.New() - } - fdb.flows[fl.Timestamp][rtr].NextHop[nextHopAddr].Insert(fl, fl, ptrIsSmaller) - locks.NextHop.Unlock() +// Add adds flow `fl` to database fdb +func (fdb *FlowDatabase) Add(fl *netflow.Flow) { + // build indices for map access + rtrip := net.IP(fl.Router) - locks.SrcAs.Lock() - if fdb.flows[fl.Timestamp][rtr].SrcAs[fl.SrcAs] == nil { - fdb.flows[fl.Timestamp][rtr].SrcAs[fl.SrcAs] = avltree.New() + if _, ok := fdb.agentsNameByIP[rtrip.String()]; !ok { + log.Warningf("Unknown flow source: %s", rtrip.String()) + return } - fdb.flows[fl.Timestamp][rtr].SrcAs[fl.SrcAs].Insert(fl, fl, ptrIsSmaller) - locks.SrcAs.Unlock() - locks.DstAs.Lock() - if fdb.flows[fl.Timestamp][rtr].DstAs[fl.DstAs] == nil { - fdb.flows[fl.Timestamp][rtr].DstAs[fl.DstAs] = avltree.New() - } - fdb.flows[fl.Timestamp][rtr].DstAs[fl.DstAs].Insert(fl, fl, ptrIsSmaller) - locks.DstAs.Unlock() + rtrName := fdb.agentsNameByIP[rtrip.String()] + timeGroup := fdb.getTimeGroup(fl, rtrName) - locks.NextHopAs.Lock() - if fdb.flows[fl.Timestamp][rtr].NextHopAs[fl.NextHopAs] == nil { - fdb.flows[fl.Timestamp][rtr].NextHopAs[fl.NextHopAs] = avltree.New() + fdb.lock.RLock() + defer fdb.lock.RUnlock() + if _, ok := fdb.flows[fl.Timestamp]; !ok { + log.Warningf("stopped adding data for %d: already deleted", fl.Timestamp) + return } - fdb.flows[fl.Timestamp][rtr].NextHopAs[fl.NextHopAs].Insert(fl, fl, ptrIsSmaller) - locks.NextHopAs.Unlock() - locks.SrcPfx.Lock() - if fdb.flows[fl.Timestamp][rtr].SrcPfx[srcPfx] == nil { - fdb.flows[fl.Timestamp][rtr].SrcPfx[srcPfx] = avltree.New() - } - fdb.flows[fl.Timestamp][rtr].SrcPfx[srcPfx].Insert(fl, fl, ptrIsSmaller) - locks.SrcPfx.Unlock() + // Insert into indices + timeGroup.Any.Insert(anyIndex, fl) + timeGroup.SrcAddr.Insert(net.IP(fl.SrcAddr), fl) + timeGroup.DstAddr.Insert(net.IP(fl.DstAddr), fl) + timeGroup.Protocol.Insert(byte(fl.Protocol), fl) + timeGroup.IntIn.Insert(uint16(fl.IntIn), fl) + timeGroup.IntOut.Insert(uint16(fl.IntOut), fl) + timeGroup.NextHop.Insert(net.IP(fl.NextHop), fl) + timeGroup.SrcAs.Insert(fl.SrcAs, fl) + timeGroup.DstAs.Insert(fl.DstAs, fl) + timeGroup.NextHopAs.Insert(fl.NextHopAs, fl) + timeGroup.SrcPfx.Insert(fl.SrcPfx.String(), fl) + timeGroup.DstPfx.Insert(fl.DstPfx.String(), fl) + timeGroup.SrcPort.Insert(fl.SrcPort, fl) + timeGroup.DstPort.Insert(fl.DstPort, fl) +} - locks.DstPfx.Lock() - if fdb.flows[fl.Timestamp][rtr].DstPfx[dstPfx] == nil { - fdb.flows[fl.Timestamp][rtr].DstPfx[dstPfx] = avltree.New() - } - fdb.flows[fl.Timestamp][rtr].DstPfx[dstPfx].Insert(fl, fl, ptrIsSmaller) - locks.DstPfx.Unlock() +// CurrentTimeslot returns the beginning of the current timeslot +func (fdb *FlowDatabase) CurrentTimeslot() int64 { + now := time.Now().Unix() + return now - now%fdb.aggregation +} - locks.SrcPort.Lock() - if fdb.flows[fl.Timestamp][rtr].SrcPort[fl.SrcPort] == nil { - fdb.flows[fl.Timestamp][rtr].SrcPort[fl.SrcPort] = avltree.New() - } - fdb.flows[fl.Timestamp][rtr].SrcPort[fl.SrcPort].Insert(fl, fl, ptrIsSmaller) - locks.SrcPort.Unlock() +// AggregationPeriod returns the configured aggregation period +func (fdb *FlowDatabase) AggregationPeriod() int64 { + return fdb.aggregation } // CleanUp deletes all flows from database `fdb` that are older than `maxAge` seconds func (fdb *FlowDatabase) CleanUp() { - now := time.Now().Unix() - now = now - now%fdb.aggregation + now := fdb.CurrentTimeslot() fdb.lock.Lock() defer fdb.lock.Unlock() @@ -290,8 +207,7 @@ func (fdb *FlowDatabase) Dumper() { defer fdb.lock.RUnlock() min := atomic.LoadInt64(&fdb.lastDump) - now := time.Now().Unix() - max := (now - now%fdb.aggregation) - 2*fdb.aggregation + max := fdb.CurrentTimeslot() - 2*fdb.aggregation atomic.StoreInt64(&fdb.lastDump, max) for ts := range fdb.flows { @@ -306,59 +222,83 @@ func (fdb *FlowDatabase) Dumper() { } func (fdb *FlowDatabase) dumpToDisk(ts int64, router string) { + if fdb.storage == "" { + return + } + fdb.lock.RLock() - tree := fdb.flows[ts][router].Any[0] + tg := fdb.flows[ts][router] + tree := fdb.flows[ts][router].Any.Get(anyIndex) fdb.lock.RUnlock() + // Create flow proto buffer flows := &netflow.Flows{} + // Populate interface mapping + for name, id := range tg.InterfaceIDByName { + flows.InterfaceMapping = append(flows.InterfaceMapping, &netflow.Intf{ + Id: uint32(id), + Name: name, + }) + } + + // Write flows into `flows` proto buffer tree.Each(dump, fdb.anonymize, flows) if fdb.debug > 1 { - glog.Warningf("flows contains %d flows", len(flows.Flows)) + log.Warningf("flows contains %d flows", len(flows.Flows)) } + + // Marshal flows into proto buffer buffer, err := proto.Marshal(flows) if err != nil { - glog.Errorf("unable to marshal flows into pb: %v", err) + log.Errorf("unable to marshal flows into pb: %v", err) return } + // Create dir if doesn't exist ymd := fmt.Sprintf("%04d-%02d-%02d", time.Unix(ts, 0).Year(), time.Unix(ts, 0).Month(), time.Unix(ts, 0).Day()) os.Mkdir(fmt.Sprintf("%s/%s", fdb.storage, ymd), 0700) + // Create file fh, err := os.Create(fmt.Sprintf("%s/%s/nf-%d-%s.tflow2.pb.gzip", fdb.storage, ymd, ts, router)) if err != nil { - glog.Errorf("couldn't create file: %v", err) + log.Errorf("couldn't create file: %v", err) } defer fh.Close() // Compress data before writing it out to the disk gz, err := gzip.NewWriterLevel(fh, fdb.compLevel) if err != nil { - glog.Errorf("invalud gzip compression level: %v", err) + log.Errorf("invalud gzip compression level: %v", err) return } + + // Compress and write file _, err = gz.Write(buffer) gz.Close() if err != nil { - glog.Errorf("failed to write file: %v", err) + log.Errorf("failed to write file: %v", err) } } func dump(node *avltree.TreeNode, vals ...interface{}) { anonymize := vals[0].(bool) flows := vals[1].(*netflow.Flows) - flow := node.Value.(*netflow.Flow) - flowcopy := *flow - if anonymize { - // Remove information about particular IP addresses for privacy reason - flowcopy.SrcAddr = []byte{0, 0, 0, 0} - flowcopy.DstAddr = []byte{0, 0, 0, 0} - } + for _, f := range node.Values { + flow := f.(*netflow.Flow) + flowcopy := *flow - flows.Flows = append(flows.Flows, &flowcopy) + if anonymize { + // Remove information about particular IP addresses for privacy reason + flowcopy.SrcAddr = []byte{0, 0, 0, 0} + flowcopy.DstAddr = []byte{0, 0, 0, 0} + } + + flows.Flows = append(flows.Flows, &flowcopy) + } } // ptrIsSmaller checks if uintptr c1 is smaller than uintptr c2 @@ -386,6 +326,7 @@ func dumpFlows(tree *avltree.TreeNode) { // printNode dumps the flow of `node` on the screen func printNode(node *avltree.TreeNode, vals ...interface{}) { - fl := node.Value.(*netflow.Flow) - nfserver.Dump(fl) + for _, fl := range node.Values { + nfserver.Dump(fl.(*netflow.Flow)) + } } diff --git a/database/database_query.go b/database/database_query.go index e7e9e6b..68f3a63 100644 --- a/database/database_query.go +++ b/database/database_query.go @@ -13,43 +13,78 @@ package database import ( "compress/gzip" - "encoding/json" "fmt" "io/ioutil" "net" "os" - "strconv" - "strings" "sync" "time" - "github.com/golang/glog" + "github.com/bio-routing/tflow2/avltree" + "github.com/bio-routing/tflow2/convert" + "github.com/bio-routing/tflow2/intfmapper" + "github.com/bio-routing/tflow2/netflow" + "github.com/bio-routing/tflow2/stats" "github.com/golang/protobuf/proto" - "github.com/google/tflow2/avltree" - "github.com/google/tflow2/convert" - "github.com/google/tflow2/netflow" - "github.com/google/tflow2/stats" + "github.com/pkg/errors" + + log "github.com/sirupsen/logrus" +) + +// These constants are used in communication with the frontend +const ( + OpEqual = 0 + OpUnequal = 1 + OpSmaller = 2 + OpGreater = 3 ) -// BreakDownMap defines by what fields data should be broken down in a query -type BreakDownMap struct { - Router bool - Family bool - SrcAddr bool - DstAddr bool - Protocol bool - IntIn bool - IntOut bool - NextHop bool - SrcAsn bool - DstAsn bool - NextHopAsn bool - SrcPfx bool - DstPfx bool - SrcPort bool - DstPort bool +// These constants are only used internally +const ( + FieldTimestamp = iota + FieldAgent + FieldFamily + FieldSrcAddr + FieldDstAddr + FieldProtocol + FieldIntIn + FieldIntOut + FieldNextHop + FieldSrcAs + FieldDstAs + FieldNextHopAs + FieldSrcPfx + FieldDstPfx + FieldSrcPort + FieldDstPort + FieldIntInName + FieldIntOutName + FieldMax +) + +var fieldNames = map[string]int{ + "Timestamp": FieldTimestamp, + "Agent": FieldAgent, + "Family": FieldFamily, + "SrcAddr": FieldSrcAddr, + "DstAddr": FieldDstAddr, + "Protocol": FieldProtocol, + "IntIn": FieldIntIn, + "IntOut": FieldIntOut, + "NextHop": FieldNextHop, + "SrcAs": FieldSrcAs, + "DstAs": FieldDstAs, + "NextHopAs": FieldNextHopAs, + "SrcPfx": FieldSrcPfx, + "DstPfx": FieldDstPfx, + "SrcPort": FieldSrcPort, + "DstPort": FieldDstPort, + "IntInName": FieldIntInName, + "IntOutName": FieldIntOutName, } +type void struct{} + // Condition represents a query condition type Condition struct { Field int @@ -57,237 +92,116 @@ type Condition struct { Operand []byte } -// ConditionExt is external representation of a query condition -type ConditionExt struct { - Field int - Operator int - Operand string -} - // Conditions represents a set of conditions of a query type Conditions []Condition -// ConditionsExt is external representation of conditions of a query -type ConditionsExt []ConditionExt - -// QueryExt represents a query in the way it is received from the frontend -type QueryExt struct { - Cond ConditionsExt - Breakdown BreakDownMap - TopN int -} - // Query is the internal representation of a query type Query struct { Cond Conditions - Breakdown BreakDownMap + Breakdown BreakdownFlags TopN int } type concurrentResSum struct { - Values map[string]uint64 + Values BreakdownMap Lock sync.Mutex } -// These constants are used in communication with the frontend -const ( - OpEqual = 0 - OpUnequal = 1 - OpSmaller = 2 - OpGreater = 3 - FieldTimestamp = 0 - FieldRouter = 1 - FieldSrcAddr = 2 - FieldDstAddr = 3 - FieldProtocol = 4 - FieldIntIn = 5 - FieldIntOut = 6 - FieldNextHop = 7 - FieldSrcAs = 8 - FieldDstAs = 9 - FieldNextHopAs = 10 - FieldSrcPfx = 11 - FieldDstPfx = 12 - FieldSrcPort = 13 - FieldDstPort = 14 -) - -// translateQuery translates a query from external representation to internal representaion -func translateQuery(e QueryExt) (Query, error) { - var q Query - q.Breakdown = e.Breakdown - q.TopN = e.TopN - - for _, c := range e.Cond { - var operand []byte - - switch c.Field { - case FieldTimestamp: - op, err := strconv.Atoi(c.Operand) - if err != nil { - return q, err - } - operand = convert.Int64Byte(int64(op)) - - case FieldProtocol: - op, err := strconv.Atoi(c.Operand) - if err != nil { - return q, err - } - operand = convert.Uint16Byte(uint16(op)) - - case FieldSrcPort: - op, err := strconv.Atoi(c.Operand) - if err != nil { - return q, err - } - operand = convert.Uint16Byte(uint16(op)) - - case FieldDstPort: - op, err := strconv.Atoi(c.Operand) - if err != nil { - return q, err - } - operand = convert.Uint16Byte(uint16(op)) - - case FieldSrcAddr: - operand = convert.IPByteSlice(c.Operand) - - case FieldDstAddr: - operand = convert.IPByteSlice(c.Operand) - - case FieldRouter: - operand = convert.IPByteSlice(c.Operand) - - case FieldIntIn: - op, err := strconv.Atoi(c.Operand) - if err != nil { - return q, err - } - operand = convert.Uint16Byte(uint16(op)) - - case FieldIntOut: - op, err := strconv.Atoi(c.Operand) - if err != nil { - return q, err - } - operand = convert.Uint16Byte(uint16(op)) - - case FieldNextHop: - operand = convert.IPByteSlice(c.Operand) - - case FieldSrcAs: - op, err := strconv.Atoi(c.Operand) - if err != nil { - return q, err - } - operand = convert.Uint32Byte(uint32(op)) - - case FieldDstAs: - op, err := strconv.Atoi(c.Operand) - if err != nil { - return q, err - } - operand = convert.Uint32Byte(uint32(op)) - - case FieldNextHopAs: - op, err := strconv.Atoi(c.Operand) - if err != nil { - return q, err - } - operand = convert.Uint32Byte(uint32(op)) - - case FieldSrcPfx: - _, pfx, err := net.ParseCIDR(string(c.Operand)) - if err != nil { - return q, err - } - operand = []byte(pfx.String()) +// GetFieldByName returns the internal number of a field +func GetFieldByName(name string) int { + if i, found := fieldNames[name]; found { + return i + } + return -1 +} - case FieldDstPfx: - _, pfx, err := net.ParseCIDR(string(c.Operand)) - if err != nil { - return q, err - } - operand = []byte(pfx.String()) +// Includes checks if the given field and operator is included in the list +func (conditions Conditions) Includes(field int, operator int) bool { + for _, cond := range conditions { + if cond.Field == field && cond.Operator == operator { + return true } - - q.Cond = append(q.Cond, Condition{ - Field: c.Field, - Operator: c.Operator, - Operand: operand, - }) } - - return q, nil + return false } // loadFromDisc loads netflow data from disk into in memory data structure -func (fdb *FlowDatabase) loadFromDisc(ts int64, router string, query Query, ch chan map[string]uint64, resSum *concurrentResSum) { +func (fdb *FlowDatabase) loadFromDisc(ts int64, agent string, query Query, resSum *concurrentResSum) (BreakdownMap, error) { + if fdb.storage == "" { + return nil, errors.Errorf("Disk storage is disabled") + } + res := avltree.New() ymd := fmt.Sprintf("%04d-%02d-%02d", time.Unix(ts, 0).Year(), time.Unix(ts, 0).Month(), time.Unix(ts, 0).Day()) - filename := fmt.Sprintf("%s/%s/nf-%d-%s.tflow2.pb.gzip", fdb.storage, ymd, ts, router) + filename := fmt.Sprintf("%s/%s/nf-%d-%s.tflow2.pb.gzip", fdb.storage, ymd, ts, agent) fh, err := os.Open(filename) if err != nil { if fdb.debug > 0 { - glog.Errorf("unable to open file: %v", err) + log.Errorf("unable to open file: %v", err) } - ch <- nil - return + return nil, err } if fdb.debug > 1 { - glog.Infof("sucessfully opened file: %s", filename) + log.Infof("successfully opened file: %s", filename) } defer fh.Close() gz, err := gzip.NewReader(fh) if err != nil { - glog.Errorf("unable to create gzip reader: %v", err) - ch <- nil - return + log.Errorf("unable to create gzip reader: %v", err) + return nil, err } defer gz.Close() buffer, err := ioutil.ReadAll(gz) if err != nil { - glog.Errorf("unable to gunzip: %v", err) - ch <- nil - return + log.Errorf("unable to gunzip: %v", err) + return nil, err } // Unmarshal protobuf flows := netflow.Flows{} err = proto.Unmarshal(buffer, &flows) if err != nil { - glog.Errorf("unable to unmarshal protobuf: %v", err) - ch <- nil - return + log.Errorf("unable to unmarshal protobuf: %v", err) + return nil, err + } + + // Create interface mapping + interfaceIDByName := make(intfmapper.InterfaceIDByName) + for _, m := range flows.InterfaceMapping { + interfaceIDByName[m.Name] = uint16(m.Id) } if fdb.debug > 1 { - glog.Infof("file %s contains %d flows", filename, len(flows.Flows)) + log.Infof("file %s contains %d flows", filename, len(flows.Flows)) } // Validate flows and add them to res tree for _, fl := range flows.Flows { - if validateFlow(fl, query) { + if validateFlow(fl, query, interfaceIDByName) { res.Insert(fl, fl, ptrIsSmaller) } } // Breakdown - resTime := make(map[string]uint64) - res.Each(breakdown, query.Breakdown, resSum, resTime) + resTime := make(BreakdownMap) + res.Each(breakdown, fdb.intfMapper.GetInterfaceNameByID(agent), fdb.iana, query.Breakdown, resSum, resTime) - ch <- resTime + return resTime, err } -func validateFlow(fl *netflow.Flow, query Query) bool { +func validateFlow(fl *netflow.Flow, query Query, interfaceIDByName intfmapper.InterfaceIDByName) bool { for _, c := range query.Cond { switch c.Field { case FieldTimestamp: continue - case FieldRouter: + case FieldAgent: + continue + case FieldFamily: + if fl.Family != uint32(convert.Uint16b(c.Operand)) { + return false + } continue case FieldProtocol: if fl.Protocol != uint32(convert.Uint16b(c.Operand)) { @@ -295,12 +209,12 @@ func validateFlow(fl *netflow.Flow, query Query) bool { } continue case FieldSrcAddr: - if net.IP(fl.SrcAddr).String() != net.IP(c.Operand).String() { + if !net.IP(fl.SrcAddr).Equal(net.IP(c.Operand)) { return false } continue case FieldDstAddr: - if net.IP(fl.DstAddr).String() != net.IP(c.Operand).String() { + if !net.IP(fl.DstAddr).Equal(net.IP(c.Operand)) { return false } continue @@ -315,7 +229,7 @@ func validateFlow(fl *netflow.Flow, query Query) bool { } continue case FieldNextHop: - if net.IP(fl.NextHop).String() != net.IP(c.Operand).String() { + if !net.IP(fl.NextHop).Equal(net.IP(c.Operand)) { return false } continue @@ -353,44 +267,40 @@ func validateFlow(fl *netflow.Flow, query Query) bool { return false } continue + case FieldIntInName: + id := interfaceIDByName[string(c.Operand)] + if uint16(fl.IntIn) != id { + return false + } + continue + case FieldIntOutName: + id := interfaceIDByName[string(c.Operand)] + if uint16(fl.IntOut) != id { + return false + } + continue } } return true } -// RunQuery executes a query and returns sends the result as JSON on `w` -func (fdb *FlowDatabase) RunQuery(query string) ([][]string, error) { - queryStart := time.Now() - stats.GlobalStats.Queries++ - var qe QueryExt - err := json.Unmarshal([]byte(query), &qe) - if err != nil { - glog.Warningf("Unable unmarshal json query: %s", query) - return nil, err - } - q, err := translateQuery(qe) - if err != nil { - glog.Warningf("Unable to translate query") - return nil, err - } - - // Determine router +func (fdb *FlowDatabase) getAgent(q *Query) (string, error) { rtr := "" for _, c := range q.Cond { - if c.Field == FieldRouter { - iprtr := net.IP(c.Operand) - rtr = iprtr.String() + if c.Field == FieldAgent { + rtr = string(c.Operand) } } if rtr == "" { - glog.Warningf("Router is mandatory cirteria") - return nil, err + log.Warningf("Agent is mandatory cirteria") + return "", errors.Errorf("Agent criteria not found") } - var start int64 - end := time.Now().Unix() + return rtr, nil +} - // Determine time window +func (fdb *FlowDatabase) getStartEndTimes(q *Query) (start int64, end int64, err error) { + end = time.Now().Unix() for _, c := range q.Cond { if c.Field != FieldTimestamp { continue @@ -400,285 +310,121 @@ func (fdb *FlowDatabase) RunQuery(query string) ([][]string, error) { start = int64(convert.Uint64b(c.Operand)) case OpSmaller: end = int64(convert.Uint64b(c.Operand)) + case OpEqual: + start = int64(convert.Uint64b(c.Operand)) + end = start } } - // Allign start point to `aggregation` raster + // Align start point to `aggregation` raster start = start - (start % fdb.aggregation) - resSum := &concurrentResSum{} - resSum.Values = make(map[string]uint64) - resTime := make(map[int64]map[string]uint64) - resChannels := make(map[int64]chan map[string]uint64) - - for ts := start; ts < end; ts += fdb.aggregation { - resChannels[ts] = make(chan map[string]uint64) - fdb.lock.RLock() - if _, ok := fdb.flows[ts]; !ok { - fdb.lock.RUnlock() - go fdb.loadFromDisc(ts, rtr, q, resChannels[ts], resSum) - fdb.lock.RLock() - if _, ok := fdb.flows[ts]; !ok { - fdb.lock.RUnlock() - continue - } - } - - // candidates keeps a list of all trees that fulfill the queries criteria - candidates := make([]*avltree.Tree, 0) - for _, c := range q.Cond { - if fdb.debug > 1 { - glog.Infof("Adding tree to cancidates list: Field: %d, Value: %d", c.Field, c.Operand) - } - switch c.Field { - case FieldTimestamp: - continue - case FieldRouter: - continue - case FieldProtocol: - candidates = append(candidates, fdb.flows[ts][rtr].Protocol[uint32(convert.Uint16b(c.Operand))]) - case FieldSrcAddr: - candidates = append(candidates, fdb.flows[ts][rtr].SrcAddr[net.IP(c.Operand).String()]) - case FieldDstAddr: - candidates = append(candidates, fdb.flows[ts][rtr].DstAddr[net.IP(c.Operand).String()]) - case FieldIntIn: - candidates = append(candidates, fdb.flows[ts][rtr].IntIn[uint32(convert.Uint16b(c.Operand))]) - case FieldIntOut: - candidates = append(candidates, fdb.flows[ts][rtr].IntOut[uint32(convert.Uint16b(c.Operand))]) - case FieldNextHop: - candidates = append(candidates, fdb.flows[ts][rtr].NextHop[net.IP(c.Operand).String()]) - case FieldSrcAs: - candidates = append(candidates, fdb.flows[ts][rtr].SrcAs[convert.Uint32b(c.Operand)]) - case FieldDstAs: - candidates = append(candidates, fdb.flows[ts][rtr].DstAs[convert.Uint32b(c.Operand)]) - case FieldNextHopAs: - candidates = append(candidates, fdb.flows[ts][rtr].NextHopAs[convert.Uint32b(c.Operand)]) - case FieldSrcPort: - candidates = append(candidates, fdb.flows[ts][rtr].SrcPort[uint32(convert.Uint16b(c.Operand))]) - case FieldDstPort: - candidates = append(candidates, fdb.flows[ts][rtr].DstPort[uint32(convert.Uint16b(c.Operand))]) - case FieldSrcPfx: - candidates = append(candidates, fdb.flows[ts][rtr].SrcPfx[string(c.Operand)]) - case FieldDstPfx: - candidates = append(candidates, fdb.flows[ts][rtr].DstPfx[string(c.Operand)]) - } - } - - if len(candidates) == 0 { - candidates = append(candidates, fdb.flows[ts][rtr].Any[0]) - } - fdb.lock.RUnlock() - - go func(candidates []*avltree.Tree, ch chan map[string]uint64, ts int64) { - if fdb.debug > 1 { - glog.Infof("candidate trees: %d (%d)", len(candidates), ts) - } + return +} - // Find common elements of candidate trees - res := avltree.Intersection(candidates) - if res == nil { - glog.Warningf("Interseciton Result was empty!") - res = fdb.flows[ts][rtr].Any[0] - } +func (fdb *FlowDatabase) getResultByTS(resSum *concurrentResSum, ts int64, q *Query, rtr string) BreakdownMap { + // timeslot in memory? + fdb.lock.RLock() + timeGroups, ok := fdb.flows[ts] + fdb.lock.RUnlock() - // Breakdown - resTime := make(map[string]uint64) - res.Each(breakdown, q.Breakdown, resSum, resTime) - ch <- resTime - }(candidates, resChannels[ts], ts) + if !ok { + // not in memory, try to load from disk + result, _ := fdb.loadFromDisc(ts, rtr, *q, resSum) + return result } - // Reading results from go routines - glog.Infof("Awaiting results from go routines") - for ts := start; ts < end; ts += fdb.aggregation { - glog.Infof("Waiting for results for ts %d", ts) - resTime[ts] = <-resChannels[ts] + if timeGroups[rtr] == nil { + log.Infof("TG of %s is nil", rtr) + return map[BreakdownKey]uint64{} } - glog.Infof("Done reading results") - // Build list of all keys - keys := make([]string, 0) + return timeGroups[rtr].filterAndBreakdown(resSum, q, fdb.iana, fdb.intfMapper.GetInterfaceNameByID(rtr)) +} +func (fdb *FlowDatabase) getTopKeys(resSum *concurrentResSum, topN int) map[BreakdownKey]void { // Build Tree Bytes -> Key to allow efficient finding of top n flows var btree = avltree.New() for k, b := range resSum.Values { - keys = append(keys, k) btree.Insert(b, k, uint64IsSmaller) } // Find top n keys - topKeysList := btree.TopN(q.TopN) - topKeys := make(map[string]int) + topKeysList := btree.TopN(topN) + topKeys := make(map[BreakdownKey]void) for _, v := range topKeysList { - topKeys[v.(string)] = 1 + topKeys[v.(BreakdownKey)] = void{} } - // Find all timestamps we have and get them sorted - tsTree := avltree.New() - for ts := range resTime { - tsTree.Insert(ts, ts, int64IsSmaller) - } - timestamps := tsTree.Dump() + return topKeys +} - queryResult := make([][]string, 0) +// RunQuery executes a query and returns the result +func (fdb *FlowDatabase) RunQuery(q *Query) (*Result, error) { + queryStart := time.Now() + stats.GlobalStats.Queries++ - // Construct table header - headLine := make([]string, 0) - headLine = append(headLine, "Time") - for _, k := range topKeysList { - headLine = append(headLine, k.(string)) + start, end, err := fdb.getStartEndTimes(q) + if err != nil { + return nil, errors.Wrap(err, "Failed to Start/End times") } - headLine = append(headLine, "Rest") - queryResult = append(queryResult, headLine) - - for _, ts := range timestamps { - line := make([]string, 0) - t := time.Unix(ts.(int64), 0) - line = append(line, fmt.Sprintf("%02d:%02d:%02d", t.Hour(), t.Minute(), t.Second())) - - // Top flows - buckets := resTime[ts.(int64)] - for _, k := range topKeysList { - if _, ok := buckets[k.(string)]; !ok { - line = append(line, "0") - } else { - line = append(line, fmt.Sprintf("%d", buckets[k.(string)]/uint64(fdb.aggregation)*8*uint64(fdb.samplerate))) - } - } - // Rest - var rest uint64 - for k, v := range buckets { - if _, ok := topKeys[k]; ok { - continue - } - rest += v - } - line = append(line, fmt.Sprintf("%d", rest)) - queryResult = append(queryResult, line) + rtr, err := fdb.getAgent(q) + if err != nil { + return nil, errors.Wrap(err, "Failed to get router") } - glog.Infof("Query %s took %d ns\n", query, time.Since(queryStart)) - return queryResult, nil -} - -// breakdown build all possible relevant keys of flows for flows in tree `node` -// and builds sums for each key in order to allow us to find top combinations -func breakdown(node *avltree.TreeNode, vals ...interface{}) { - if len(vals) != 3 { - glog.Errorf("lacking arguments") - return + // resSum holds a sum per breakdown key over all timestamps + resSum := &concurrentResSum{ + Values: make(BreakdownMap), } - bd := vals[0].(BreakDownMap) - sums := vals[1].(*concurrentResSum) - buckets := vals[2].(map[string]uint64) - fl := node.Value.(*netflow.Flow) - - // Build format string to build key - srcAddr := "_" - dstAddr := "_" - protocol := "_" - intIn := "_" - intOut := "_" - nextHop := "_" - srcAs := "_" - dstAs := "_" - nextHopAs := "_" - srcPfx := "_" - dstPfx := "_" - srcPort := "_" - dstPort := "_" - - if bd.SrcAddr { - srcAddr = fmt.Sprintf("Src:%s", net.IP(fl.SrcAddr).String()) - } - if bd.DstAddr { - dstAddr = fmt.Sprintf("Dst:%s", net.IP(fl.DstAddr).String()) - } - if bd.Protocol { - protocol = fmt.Sprintf("Proto:%d", fl.Protocol) - } - if bd.IntIn { - intIn = fmt.Sprintf("IntIn:%d", fl.IntIn) - } - if bd.IntOut { - intOut = fmt.Sprintf("IntOut:%d", fl.IntOut) - } - if bd.NextHop { - nextHop = fmt.Sprintf("NH:%s", net.IP(fl.NextHop).String()) - } - if bd.SrcAsn { - srcAs = fmt.Sprintf("SrcAS:%d", fl.SrcAs) - } - if bd.DstAsn { - dstAs = fmt.Sprintf("DstAS:%d", fl.DstAs) - } - if bd.NextHopAsn { - nextHopAs = fmt.Sprintf("NH_AS:%d", fl.NextHopAs) - } - if bd.SrcPfx { - if fl.SrcPfx != nil { - pfx := net.IPNet{ - IP: fl.SrcPfx.IP, - Mask: fl.SrcPfx.Mask, - } - srcPfx = fmt.Sprintf("SrcNet:%s", pfx.String()) - } else { - srcPfx = fmt.Sprintf("SrcNet:0.0.0.0/0") - } - } - if bd.DstPfx { - if fl.DstPfx != nil { - pfx := net.IPNet{ - IP: fl.DstPfx.IP, - Mask: fl.DstPfx.Mask, + // resTime holds individual sums per breakdown key and ts + resTime := make(map[int64]BreakdownMap) + resMtx := sync.Mutex{} + resWg := sync.WaitGroup{} + + for ts := start; ts <= end; ts += fdb.aggregation { + log.Infof("RunQuery: start timeslot %d", ts) + resWg.Add(1) + go func(ts int64) { + result := fdb.getResultByTS(resSum, ts, q, rtr) + + if result != nil { + log.Infof("RunQuery: data in timeslot %d", ts) + resMtx.Lock() + resTime[ts] = result + resMtx.Unlock() } - dstPfx = fmt.Sprintf("DstNet:%s", pfx.String()) - } else { - dstPfx = fmt.Sprintf("DstNet:0.0.0.0/0") - } - } - if bd.SrcPort { - srcPort = fmt.Sprintf("SrcPort:%d", fl.SrcPort) + resWg.Done() + }(ts) } - if bd.DstPort { - dstPort = fmt.Sprintf("DstPort:%d", fl.DstPort) - } - - // Build key - key := fmt.Sprintf("%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s", srcAddr, dstAddr, protocol, intIn, intOut, nextHop, srcAs, dstAs, nextHopAs, srcPfx, dstPfx, srcPort, dstPort) - // Remove underscores from key - key = strings.Replace(key, ",_,", ",", -1) - key = strings.Replace(key, "_,", "", -1) - key = strings.Replace(key, ",_", "", -1) + resWg.Wait() - // Remove leading and trailing commas - parts := strings.Split(key, "") - first := 0 - last := len(parts) - 1 - if parts[0] == "," { - first++ - } - if parts[last] == "," { - last-- + // Find all timestamps we have and get them sorted + tsTree := avltree.New() + for ts := range resTime { + tsTree.Insert(ts, ts, int64IsSmaller) } - key = strings.Join(parts[first:last+1], "") - // Build sum for key - if _, ok := buckets[key]; !ok { - buckets[key] = fl.Size - } else { - buckets[key] += fl.Size + // Generate topKeys if required + var topKeys map[BreakdownKey]void + if q.TopN > 0 { + topKeys = fdb.getTopKeys(resSum, q.TopN) } - // Build overall sum - sums.Lock.Lock() - if _, ok := sums.Values[key]; !ok { - sums.Values[key] = fl.Size - } else { - sums.Values[key] += fl.Size + timestamps := make([]int64, 0) + for _, ts := range tsTree.Dump() { + timestamps = append(timestamps, ts.(int64)) } - sums.Lock.Unlock() + + log.Infof("Query %v took %d ns\n", q, time.Since(queryStart)) + + return &Result{ + TopKeys: topKeys, + Timestamps: timestamps, + Data: resTime, + Aggregation: fdb.aggregation, + }, nil } diff --git a/database/database_test.go b/database/database_test.go new file mode 100644 index 0000000..a0c8156 --- /dev/null +++ b/database/database_test.go @@ -0,0 +1,494 @@ +package database + +import ( + "fmt" + "net" + "testing" + "time" + + "github.com/stretchr/testify/assert" + + "github.com/bio-routing/tflow2/convert" + "github.com/bio-routing/tflow2/iana" + "github.com/bio-routing/tflow2/intfmapper" + "github.com/bio-routing/tflow2/netflow" +) + +type intfMapper struct { +} + +func (m *intfMapper) GetInterfaceIDByName(agent string) intfmapper.InterfaceIDByName { + return intfmapper.InterfaceIDByName{ + "xe-0/0/1": 1, + "xe-0/0/2": 2, + "xe-0/0/3": 3, + } +} + +func (m *intfMapper) GetInterfaceNameByID(agent string) intfmapper.InterfaceNameByID { + return intfmapper.InterfaceNameByID{ + 1: "xe-0/0/1", + 2: "xe-0/0/2", + 3: "xe-0/0/3", + } +} + +func TestQuery(t *testing.T) { + minute := int64(60) + hour := int64(3600) + + ts1 := int64(3600) + ts1 = ts1 - ts1%minute + + tests := []struct { + name string + flows []*netflow.Flow + query *Query + expectedResult Result + }{ + { + // Testcase: 2 flows from AS100 to AS300 and back (TCP session). + name: "Test 1", + flows: []*netflow.Flow{ + { + Router: []byte{1, 2, 3, 4}, + Family: 4, + SrcAddr: []byte{10, 0, 0, 1}, + DstAddr: []byte{30, 0, 0, 1}, + Protocol: 6, + SrcPort: 12345, + DstPort: 443, + Packets: 2, + Size: 1000, + IntIn: 1, + IntOut: 3, + NextHop: []byte{30, 0, 0, 100}, + SrcAs: 100, + DstAs: 300, + NextHopAs: 300, + Samplerate: 4, + Timestamp: ts1, + }, + { + Router: []byte{1, 2, 3, 4}, + Family: 4, + SrcAddr: []byte{10, 0, 0, 1}, + DstAddr: []byte{30, 0, 0, 2}, + Protocol: 6, + SrcPort: 12345, + DstPort: 443, + Packets: 2, + Size: 1000, + IntIn: 1, + IntOut: 3, + NextHop: []byte{30, 0, 0, 100}, + SrcAs: 100, + DstAs: 300, + NextHopAs: 300, + Samplerate: 4, + Timestamp: ts1, + }, + { + Router: []byte{1, 2, 3, 4}, + Family: 4, + SrcAddr: []byte{30, 0, 0, 1}, + DstAddr: []byte{10, 0, 0, 1}, + Protocol: 6, + SrcPort: 443, + DstPort: 12345, + Packets: 5, + Size: 10000, + IntIn: 3, + IntOut: 1, + NextHop: []byte{10, 0, 0, 100}, + SrcAs: 300, + DstAs: 100, + NextHopAs: 100, + Samplerate: 4, + Timestamp: ts1, + }, + { + Router: []byte{1, 2, 3, 4}, + Family: 4, + SrcAddr: []byte{30, 0, 0, 2}, + DstAddr: []byte{10, 0, 0, 1}, + Protocol: 6, + SrcPort: 443, + DstPort: 12345, + Packets: 5, + Size: 10000, + IntIn: 3, + IntOut: 1, + NextHop: []byte{10, 0, 0, 100}, + SrcAs: 300, + DstAs: 100, + NextHopAs: 100, + Samplerate: 4, + Timestamp: ts1, + }, + }, + query: &Query{ + Cond: []Condition{ + { + Field: FieldAgent, + Operator: OpEqual, + Operand: []byte("test01.pop01"), + }, + { + Field: FieldTimestamp, + Operator: OpGreater, + Operand: convert.Uint64Byte(uint64(ts1 - 3*minute)), + }, + { + Field: FieldTimestamp, + Operator: OpSmaller, + Operand: convert.Uint64Byte(uint64(ts1 + minute)), + }, + { + Field: FieldIntOut, + Operator: OpEqual, + Operand: convert.Uint16Byte(uint16(1)), + }, + }, + Breakdown: BreakdownFlags{ + SrcAddr: true, + DstAddr: true, + }, + TopN: 100, + }, + expectedResult: Result{ + TopKeys: map[BreakdownKey]void{ + { + FieldSrcAddr: "30.0.0.1", + FieldDstAddr: "10.0.0.1", + }: {}, + { + FieldSrcAddr: "30.0.0.2", + FieldDstAddr: "10.0.0.1", + }: {}, + }, + Timestamps: []int64{ + ts1, + }, + Data: map[int64]BreakdownMap{ + ts1: { + BreakdownKey{ + FieldSrcAddr: "30.0.0.1", + FieldDstAddr: "10.0.0.1", + }: 40000, + BreakdownKey{ + FieldSrcAddr: "30.0.0.2", + FieldDstAddr: "10.0.0.1", + }: 40000, + }, + }, + Aggregation: minute, + }, + }, + + { + // Testcase: 2 flows from AS100 to AS300 and back (TCP session). + // Opposite direction of Test 1 + name: "Test 2", + flows: []*netflow.Flow{ + { + Router: []byte{1, 2, 3, 4}, + Family: 4, + SrcAddr: []byte{10, 0, 0, 1}, + DstAddr: []byte{30, 0, 0, 1}, + Protocol: 6, + SrcPort: 12345, + DstPort: 443, + Packets: 2, + Size: 1000, + IntIn: 1, + IntOut: 3, + NextHop: []byte{30, 0, 0, 100}, + SrcAs: 100, + DstAs: 300, + NextHopAs: 300, + Samplerate: 4, + Timestamp: ts1, + }, + { + Router: []byte{1, 2, 3, 4}, + Family: 4, + SrcAddr: []byte{10, 0, 0, 1}, + DstAddr: []byte{30, 0, 0, 2}, + Protocol: 6, + SrcPort: 12345, + DstPort: 443, + Packets: 2, + Size: 1000, + IntIn: 1, + IntOut: 3, + NextHop: []byte{30, 0, 0, 100}, + SrcAs: 100, + DstAs: 300, + NextHopAs: 300, + Samplerate: 4, + Timestamp: ts1, + }, + { + Router: []byte{1, 2, 3, 4}, + Family: 4, + SrcAddr: []byte{30, 0, 0, 1}, + DstAddr: []byte{10, 0, 0, 1}, + Protocol: 6, + SrcPort: 443, + DstPort: 12345, + Packets: 5, + Size: 10000, + IntIn: 3, + IntOut: 1, + NextHop: []byte{10, 0, 0, 100}, + SrcAs: 300, + DstAs: 100, + NextHopAs: 100, + Samplerate: 4, + Timestamp: ts1, + }, + { + Router: []byte{1, 2, 3, 4}, + Family: 4, + SrcAddr: []byte{30, 0, 0, 2}, + DstAddr: []byte{10, 0, 0, 1}, + Protocol: 6, + SrcPort: 443, + DstPort: 12345, + Packets: 5, + Size: 10000, + IntIn: 3, + IntOut: 1, + NextHop: []byte{10, 0, 0, 100}, + SrcAs: 300, + DstAs: 100, + NextHopAs: 100, + Samplerate: 4, + Timestamp: ts1, + }, + }, + query: &Query{ + Cond: []Condition{ + { + Field: FieldAgent, + Operator: OpEqual, + Operand: []byte("test01.pop01"), + }, + { + Field: FieldTimestamp, + Operator: OpGreater, + Operand: convert.Uint64Byte(uint64(ts1 - 3*minute)), + }, + { + Field: FieldTimestamp, + Operator: OpSmaller, + Operand: convert.Uint64Byte(uint64(ts1 + minute)), + }, + { + Field: FieldIntOut, + Operator: OpEqual, + Operand: convert.Uint16Byte(uint16(3)), + }, + }, + Breakdown: BreakdownFlags{ + SrcAddr: true, + DstAddr: true, + }, + TopN: 100, + }, + expectedResult: Result{ + TopKeys: map[BreakdownKey]void{ + { + FieldSrcAddr: "10.0.0.1", + FieldDstAddr: "30.0.0.1", + }: {}, + { + FieldSrcAddr: "10.0.0.1", + FieldDstAddr: "30.0.0.2", + }: {}, + }, + Timestamps: []int64{ + ts1, + }, + Data: map[int64]BreakdownMap{ + ts1: { + BreakdownKey{ + FieldSrcAddr: "10.0.0.1", + FieldDstAddr: "30.0.0.1", + }: 4000, + BreakdownKey{ + FieldSrcAddr: "10.0.0.1", + FieldDstAddr: "30.0.0.2", + }: 4000, + }, + }, + Aggregation: minute, + }, + }, + + { + // Testcase: 2 flows from AS100 to AS300 and back (TCP session). + // Test TopN function + name: "Test 3", + flows: []*netflow.Flow{ + { + Router: []byte{1, 2, 3, 4}, + Family: 4, + SrcAddr: []byte{10, 0, 0, 1}, + DstAddr: []byte{30, 0, 0, 1}, + Protocol: 6, + SrcPort: 12345, + DstPort: 443, + Packets: 2, + Size: 1001, + IntIn: 1, + IntOut: 3, + NextHop: []byte{30, 0, 0, 100}, + SrcAs: 100, + DstAs: 300, + NextHopAs: 300, + Samplerate: 4, + Timestamp: ts1, + }, + { + Router: []byte{1, 2, 3, 4}, + Family: 4, + SrcAddr: []byte{10, 0, 0, 1}, + DstAddr: []byte{30, 0, 0, 2}, + Protocol: 6, + SrcPort: 12345, + DstPort: 443, + Packets: 2, + Size: 1000, + IntIn: 1, + IntOut: 3, + NextHop: []byte{30, 0, 0, 100}, + SrcAs: 100, + DstAs: 300, + NextHopAs: 300, + Samplerate: 4, + Timestamp: ts1, + }, + { + Router: []byte{1, 2, 3, 4}, + Family: 4, + SrcAddr: []byte{30, 0, 0, 1}, + DstAddr: []byte{10, 0, 0, 1}, + Protocol: 6, + SrcPort: 443, + DstPort: 12345, + Packets: 5, + Size: 10000, + IntIn: 3, + IntOut: 1, + NextHop: []byte{10, 0, 0, 100}, + SrcAs: 300, + DstAs: 100, + NextHopAs: 100, + Samplerate: 4, + Timestamp: ts1, + }, + { + Router: []byte{1, 2, 3, 4}, + Family: 4, + SrcAddr: []byte{30, 0, 0, 2}, + DstAddr: []byte{10, 0, 0, 1}, + Protocol: 6, + SrcPort: 443, + DstPort: 12345, + Packets: 5, + Size: 10000, + IntIn: 3, + IntOut: 1, + NextHop: []byte{10, 0, 0, 100}, + SrcAs: 300, + DstAs: 100, + NextHopAs: 100, + Samplerate: 4, + Timestamp: ts1, + }, + }, + query: &Query{ + Cond: []Condition{ + { + Field: FieldAgent, + Operator: OpEqual, + Operand: []byte("test01.pop01"), + }, + { + Field: FieldTimestamp, + Operator: OpGreater, + Operand: convert.Uint64Byte(uint64(ts1 - 3*minute)), + }, + { + Field: FieldTimestamp, + Operator: OpSmaller, + Operand: convert.Uint64Byte(uint64(ts1 + minute)), + }, + { + Field: FieldIntOut, + Operator: OpEqual, + Operand: convert.Uint16Byte(uint16(3)), + }, + }, + Breakdown: BreakdownFlags{ + SrcAddr: true, + DstAddr: true, + }, + TopN: 1, + }, + expectedResult: Result{ + TopKeys: map[BreakdownKey]void{ + { + FieldSrcAddr: "10.0.0.1", + FieldDstAddr: "30.0.0.1", + }: {}, + }, + Timestamps: []int64{ + ts1, + }, + Data: map[int64]BreakdownMap{ + ts1: { + BreakdownKey{ + FieldSrcAddr: "10.0.0.1", + FieldDstAddr: "30.0.0.1", + }: 4004, + BreakdownKey{ + FieldSrcAddr: "10.0.0.1", + FieldDstAddr: "30.0.0.2", + }: 4000, + }, + }, + Aggregation: minute, + }, + }, + } + + for _, test := range tests { + fdb := New(minute, hour, 1, 0, 6, "", false, &intfMapper{}, map[string]string{ + net.IP([]byte{1, 2, 3, 4}).String(): "test01.pop01", + }, iana.New()) + + for _, flow := range test.flows { + fdb.Input <- flow + } + + time.Sleep(time.Second) + + result, err := fdb.RunQuery(test.query) + if err != nil { + t.Errorf("Unexpected error on RunQuery: %v", err) + } + + assert.Equal(t, test.expectedResult, *result, test.name) + } +} + +func dumpRes(res Result) { + for ts := range res.Data { + for k, v := range res.Data[ts] { + fmt.Printf("TS: %d\tKey: %v\t %d\n", ts, k, v) + } + } +} diff --git a/database/map_tree.go b/database/map_tree.go new file mode 100644 index 0000000..c4ef813 --- /dev/null +++ b/database/map_tree.go @@ -0,0 +1,61 @@ +package database + +import ( + "fmt" + "net" + "sync" + + "github.com/bio-routing/tflow2/avltree" + "github.com/bio-routing/tflow2/convert" +) + +type mapTree struct { + entries map[string]*avltree.Tree + sync.RWMutex +} + +func newMapTree() *mapTree { + return &mapTree{ + entries: make(map[string]*avltree.Tree), + } +} + +func createKey(key interface{}) string { + switch val := key.(type) { + case string: + return val + case []uint8: + return string(val) + case byte: + return string([]byte{val}) + case int64: + return string(convert.Int64Byte(val)) + case uint16: + return string(convert.Uint16Byte(val)) + case uint32: + return string(convert.Uint32Byte(val)) + case net.IP: + if addr := val.To4(); addr != nil { + return string(addr) + } + return string(val.To16()) + default: + panic(fmt.Sprintf("unsupported key type: %T", key)) + } +} + +func (m *mapTree) Insert(key interface{}, value interface{}) { + keyStr := createKey(key) + m.Lock() + root, ok := m.entries[keyStr] + if !ok { + root = avltree.New() + m.entries[keyStr] = root + } + root.Insert(value, value, ptrIsSmaller) + m.Unlock() +} + +func (m *mapTree) Get(key interface{}) *avltree.Tree { + return m.entries[createKey(key)] +} diff --git a/database/map_tree_test.go b/database/map_tree_test.go new file mode 100644 index 0000000..6ffe284 --- /dev/null +++ b/database/map_tree_test.go @@ -0,0 +1,33 @@ +package database + +import ( + "net" + "testing" + + "github.com/stretchr/testify/assert" +) + +func TestCreateKey(t *testing.T) { + assert := assert.New(t) + + assert.Equal("test", createKey("test")) + assert.Equal("\x17", createKey(byte(23))) + assert.Equal("\x00\x17", createKey(uint16(23))) + assert.Equal("\x17\x2a", createKey([]uint8{23, 42})) + assert.Equal("\x00\x00\x00\x00\x00\x00\x00\x17", createKey(int64(23))) + assert.Equal("\x00\x00\x00\x17", createKey(uint32(23))) + assert.Equal("\x01\x02\x03\x04", createKey(net.IP{1, 2, 3, 4})) + assert.Equal("\xfe\x80\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x00\x01", createKey(net.ParseIP("fe80::1"))) + + assert.PanicsWithValuef("unsupported key type: struct {}", func() { createKey(struct{}{}) }, "") +} + +func TestMapTree(t *testing.T) { + assert := assert.New(t) + + mapTree := newMapTree() + assert.Nil(mapTree.Get("foo")) + + mapTree.Insert("foo", "bar") + assert.NotNil(mapTree.Get("foo")) +} diff --git a/database/result.go b/database/result.go new file mode 100644 index 0000000..172e44b --- /dev/null +++ b/database/result.go @@ -0,0 +1,59 @@ +package database + +import ( + "encoding/csv" + "fmt" + "io" + "time" +) + +// Result is the result of a query +type Result struct { + TopKeys map[BreakdownKey]void + Timestamps []int64 // sorted timestamps + Data map[int64]BreakdownMap // timestamps -> keys -> values + Aggregation int64 +} + +// WriteCSV writes the result as CSV into the writer +func (res *Result) WriteCSV(writer io.Writer) { + w := csv.NewWriter(writer) + defer w.Flush() + + // Construct table header + headLine := make([]string, 0) + headLine = append(headLine, "Time") + topKeys := make([]BreakdownKey, 0) + + for k := range res.TopKeys { + topKeys = append(topKeys, k) + headLine = append(headLine, k.Join("%s:%s")) + } + headLine = append(headLine, "Rest") + w.Write(headLine) + + for _, ts := range res.Timestamps { + line := make([]string, 0) + t := time.Unix(ts, 0) + line = append(line, fmt.Sprintf("%02d:%02d:%02d", t.Hour(), t.Minute(), t.Second())) + + // Top flows + buckets := res.Data[ts] + for _, k := range topKeys { + if _, ok := buckets[k]; !ok { + line = append(line, "0") + } else { + line = append(line, fmt.Sprintf("%d", buckets[k]/uint64(res.Aggregation)*8)) + } + } + + // Remaining flows + var rest uint64 + for k, v := range buckets { + if _, ok := res.TopKeys[k]; !ok { + rest += v + } + } + w.Write(append(line, fmt.Sprintf("%d", rest))) + } +} diff --git a/database/time_group.go b/database/time_group.go new file mode 100644 index 0000000..1b8a47c --- /dev/null +++ b/database/time_group.go @@ -0,0 +1,92 @@ +package database + +import ( + "net" + + "github.com/bio-routing/tflow2/avltree" + "github.com/bio-routing/tflow2/convert" + "github.com/bio-routing/tflow2/iana" + "github.com/bio-routing/tflow2/intfmapper" + + log "github.com/sirupsen/logrus" +) + +// TimeGroup groups all indices to flows of a particular router at a particular +// time into one object +type TimeGroup struct { + Any *mapTree // Workaround: Why a map? Because: cannot assign to flows[fl.Timestamp][rtr].Any + SrcAddr *mapTree + DstAddr *mapTree + Protocol *mapTree + IntIn *mapTree + IntOut *mapTree + NextHop *mapTree + SrcAs *mapTree + DstAs *mapTree + NextHopAs *mapTree + SrcPfx *mapTree + DstPfx *mapTree + SrcPort *mapTree + DstPort *mapTree + InterfaceIDByName intfmapper.InterfaceIDByName +} + +func (tg *TimeGroup) filterAndBreakdown(resSum *concurrentResSum, q *Query, iana *iana.IANA, intfMap intfmapper.InterfaceNameByID) BreakdownMap { + // candidates keeps a list of all trees that fulfill the queries criteria + candidates := make([]*avltree.Tree, 0) + for _, c := range q.Cond { + switch c.Field { + case FieldTimestamp: + continue + case FieldAgent: + continue + case FieldProtocol: + candidates = append(candidates, tg.Protocol.Get(c.Operand[0])) + case FieldSrcAddr: + candidates = append(candidates, tg.SrcAddr.Get(net.IP(c.Operand))) + case FieldDstAddr: + candidates = append(candidates, tg.DstAddr.Get(net.IP(c.Operand))) + case FieldIntIn: + candidates = append(candidates, tg.IntIn.Get(convert.Uint16b(c.Operand))) + case FieldIntOut: + candidates = append(candidates, tg.IntOut.Get(convert.Uint16b(c.Operand))) + case FieldNextHop: + candidates = append(candidates, tg.NextHop.Get(net.IP(c.Operand))) + case FieldSrcAs: + candidates = append(candidates, tg.SrcAs.Get(convert.Uint32b(c.Operand))) + case FieldDstAs: + candidates = append(candidates, tg.DstAs.Get(convert.Uint32b(c.Operand))) + case FieldNextHopAs: + candidates = append(candidates, tg.NextHopAs.Get(convert.Uint32b(c.Operand))) + case FieldSrcPort: + candidates = append(candidates, tg.SrcPort.Get(c.Operand)) + case FieldDstPort: + candidates = append(candidates, tg.DstPort.Get(c.Operand)) + case FieldSrcPfx: + candidates = append(candidates, tg.SrcPfx.Get(c.Operand)) + case FieldDstPfx: + candidates = append(candidates, tg.DstPfx.Get(c.Operand)) + case FieldIntInName: + intID := tg.InterfaceIDByName[string(c.Operand)] + candidates = append(candidates, tg.IntIn.Get(intID)) + case FieldIntOutName: + intID := tg.InterfaceIDByName[string(c.Operand)] + candidates = append(candidates, tg.IntOut.Get(intID)) + } + } + + if len(candidates) == 0 { + candidates = append(candidates, tg.Any.Get(anyIndex)) + } + + // Find common elements of candidate trees + res := avltree.Intersection(candidates) + if res == nil { + log.Warningf("Intersection result was empty!") + } + + // Breakdown + resTime := make(BreakdownMap) + res.Each(breakdown, intfMap, iana, q.Breakdown, resSum, resTime) + return resTime +} diff --git a/frontend/frontend.go b/frontend/frontend.go index 23c315d..b974c5f 100644 --- a/frontend/frontend.go +++ b/frontend/frontend.go @@ -13,40 +13,43 @@ package frontend import ( - "bufio" - "encoding/csv" "encoding/json" "fmt" - "io" "io/ioutil" "net/http" _ "net/http/pprof" // Needed for profiling only - "net/url" - "os" - "regexp" "strings" - "github.com/google/tflow2/database" - "github.com/google/tflow2/stats" - "github.com/golang/glog" + "github.com/bio-routing/tflow2/config" + "github.com/bio-routing/tflow2/database" + "github.com/bio-routing/tflow2/iana" + "github.com/bio-routing/tflow2/intfmapper" + "github.com/bio-routing/tflow2/stats" + + log "github.com/sirupsen/logrus" ) // Frontend represents the web interface type Frontend struct { - protocols map[string]string - indexHTML string - flowDB *database.FlowDatabase + protocols map[string]string + indexHTML string + flowDB *database.FlowDatabase + intfMapper *intfmapper.Mapper + iana *iana.IANA + config *config.Config } // New creates a new `Frontend` -func New(addr string, protoNumsFilename string, fdb *database.FlowDatabase) *Frontend { +func New(fdb *database.FlowDatabase, intfMapper *intfmapper.Mapper, iana *iana.IANA, config *config.Config) *Frontend { fe := &Frontend{ - flowDB: fdb, + flowDB: fdb, + intfMapper: intfMapper, + iana: iana, + config: config, } - fe.populateProtocols(protoNumsFilename) fe.populateIndexHTML() http.HandleFunc("/", fe.httpHandler) - go http.ListenAndServe(addr, nil) + go http.ListenAndServe(fe.config.Frontend.Listen, nil) return fe } @@ -54,36 +57,46 @@ func New(addr string, protoNumsFilename string, fdb *database.FlowDatabase) *Fro func (fe *Frontend) populateIndexHTML() { html, err := ioutil.ReadFile("tflow2.html") if err != nil { - glog.Errorf("Unable to read tflow2.html: %v", err) + log.Errorf("Unable to read tflow2.html: %v", err) return } fe.indexHTML = string(html) } -func (fe *Frontend) populateProtocols(protoNumsFilename string) { - f, err := os.Open(protoNumsFilename) - if err != nil { - glog.Errorf("Couldn't open protoNumsFile: %v\n", err) - return +func (fe *Frontend) agentsHandler(w http.ResponseWriter, r *http.Request) { + type routerJSON struct { + Name string + Interfaces []string + } + type routersJSON struct { + Agents []routerJSON + } + + data := routersJSON{ + Agents: make([]routerJSON, 0), } - r := csv.NewReader(bufio.NewReader(f)) - fe.protocols = make(map[string]string) - for { - record, err := r.Read() - if err == io.EOF { - break - } - ok, err := regexp.Match("^[0-9]{1,3}$", []byte(record[0])) - if err != nil { - fmt.Printf("Regex: %v\n", err) - continue + for _, agent := range fe.config.Agents { + a := routerJSON{ + Name: agent.Name, + Interfaces: make([]string, 0), } - if ok { - fe.protocols[record[0]] = record[1] + + intfmap := fe.intfMapper.GetInterfaceIDByName(a.Name) + for name := range intfmap { + a.Interfaces = append(a.Interfaces, name) } + + data.Agents = append(data.Agents, a) + } + + b, err := json.Marshal(data) + if err != nil { + http.Error(w, fmt.Sprintf("Marshal failed: %v", err), 500) } + + fmt.Fprintf(w, "%s", string(b)) } func (fe *Frontend) httpHandler(w http.ResponseWriter, r *http.Request) { @@ -96,23 +109,27 @@ func (fe *Frontend) httpHandler(w http.ResponseWriter, r *http.Request) { fe.indexHandler(w, r) case "/query": fe.queryHandler(w, r) - case "/varz": - stats.Varz(w) + case "/metrics": + stats.Metrics(w) case "/protocols": fe.getProtocols(w, r) - case "/routers": - fileHandler(w, r, "routers.json") + case "/promquery": + fe.prometheusHandler(w, r) + case "/agents": + fe.agentsHandler(w, r) case "/tflow2.css": fileHandler(w, r, "tflow2.css") case "/tflow2.js": fileHandler(w, r, "tflow2.js") + case "/papaparse.min.js": + fileHandler(w, r, "vendors/papaparse/papaparse.min.js") } } func (fe *Frontend) getProtocols(w http.ResponseWriter, r *http.Request) { - output, err := json.Marshal(fe.protocols) + output, err := json.Marshal(fe.iana.GetIPProtocolsByName()) if err != nil { - glog.Warningf("Unable to marshal: %v", err) + log.Warningf("Unable to marshal: %v", err) http.Error(w, "Unable to marshal data", 500) } fmt.Fprintf(w, "%s", output) @@ -121,35 +138,16 @@ func (fe *Frontend) getProtocols(w http.ResponseWriter, r *http.Request) { func fileHandler(w http.ResponseWriter, r *http.Request, filename string) { content, err := ioutil.ReadFile(filename) if err != nil { - glog.Warningf("Unable to read file: %v", err) + log.Warningf("Unable to read file: %v", err) http.Error(w, "Unable to read file", 404) } fmt.Fprintf(w, "%s", string(content)) } func (fe *Frontend) indexHandler(w http.ResponseWriter, r *http.Request) { - query := "{}" - for _, p := range strings.Split(r.URL.RawQuery, "&") { - parts := strings.SplitN(p, "=", 2) - if len(parts) == 0 { - glog.Warningf("query was empty") - http.Error(w, "query was empty", 400) - continue - } - param := parts[0] - value := "" - if len(parts) == 2 { - value = parts[1] - } - - if param == "query" { - var err error - query, err = url.QueryUnescape(value) - if err != nil { - glog.Warningf("unable to decode URL parameter query") - http.Error(w, "unable to decode URL parameter query", 503) - } - } + query := r.URL.Query().Get("query") + if query == "" { + query = "{}" } output := strings.Replace(fe.indexHTML, "VAR_QUERY", query, -1) @@ -158,75 +156,27 @@ func (fe *Frontend) indexHandler(w http.ResponseWriter, r *http.Request) { func (fe *Frontend) queryHandler(w http.ResponseWriter, r *http.Request) { w.Header().Set("Access-Control-Allow-Origin", "*") - q := "" - for _, p := range strings.Split(r.URL.RawQuery, "&") { - parts := strings.SplitN(p, "=", 2) - param := parts[0] - value := "" - if len(parts) > 1 { - value = parts[1] - } - if param == "q" { - var err error - q, err = url.QueryUnescape(value) - if err != nil { - glog.Warningf("Unable to unescape query: %v", err) - http.Error(w, "Unable to unescape query", 400) - } + query, errors := fe.translateQuery(r.URL.Query()) + if errors != nil { + http.Error(w, "Unable to parse query:", 422) + for _, err := range errors { + fmt.Fprintln(w, err.Error()) } + return } - result, err := fe.flowDB.RunQuery(q) + result, err := fe.flowDB.RunQuery(&query) if err != nil { - glog.Errorf("Query failed: %v", err) - http.Error(w, "Query failed", 500) - } - - fe.printResult(w, result) -} - -func (fe *Frontend) printResult(w http.ResponseWriter, result [][]string) { - rows := len(result) - if rows == 0 { + http.Error(w, fmt.Sprintf("Query failed: %v", err), 500) return } - columns := len(result[0]) - - fmt.Fprintf(w, "[\n") - fmt.Fprintf(w, "[ ") - // Send header of table to client - for i, val := range result[0] { - if i < columns-1 { - fmt.Fprintf(w, "\"%s\", ", string(val)) - continue - } - fmt.Fprintf(w, "\"%s\"", string(val)) - } - if rows == 1 { - fmt.Fprintf(w, "]\n") + + if len(result.Data) == 0 { + w.WriteHeader(http.StatusNoContent) return } - fmt.Fprintf(w, "],\n") - - for i, row := range result[1:] { - fmt.Fprintf(w, "[ ") - for j, column := range row { - if j == 0 { - fmt.Fprintf(w, "\"%s\", ", string(column)) - continue - } - if j < columns-1 { - fmt.Fprintf(w, "%s, ", string(column)) - continue - } - fmt.Fprintf(w, "%s", string(column)) - } - if i < rows-2 { - fmt.Fprintf(w, "],\n") - continue - } - fmt.Fprintf(w, "]\n") - } - fmt.Fprintf(w, "]") + + w.Header().Set("Content-Type", "text/csv") + result.WriteCSV(w) } diff --git a/frontend/prometheus.go b/frontend/prometheus.go new file mode 100644 index 0000000..3c94bb8 --- /dev/null +++ b/frontend/prometheus.go @@ -0,0 +1,89 @@ +package frontend + +import ( + "fmt" + "net/http" + + "github.com/bio-routing/tflow2/convert" + "github.com/bio-routing/tflow2/database" +) + +func (fe *Frontend) prometheusHandler(w http.ResponseWriter, r *http.Request) { + fmt.Printf("r.URL.Query(): %s\n", r.URL.Query()) + query, errors := fe.translateQuery(r.URL.Query()) + if errors != nil { + http.Error(w, "Unable to parse query:", 422) + for _, err := range errors { + fmt.Fprintln(w, err.Error()) + } + return + } + + if query.Breakdown.Count() == 0 { + http.Error(w, "Breakdown parameter missing. Please pass a comma separated list of:", 422) + for _, label := range database.GetBreakdownLabels() { + fmt.Fprintf(w, "- %s\n", label) + } + return + } + + if !query.Cond.Includes(database.FieldTimestamp, database.OpEqual) { + // Select most recent complete timeslot + ts := fe.flowDB.CurrentTimeslot() - fe.flowDB.AggregationPeriod() + query.Cond = append(query.Cond, database.Condition{ + Field: database.FieldTimestamp, + Operator: database.OpEqual, + Operand: convert.Int64Byte(ts), + }) + } + + // Run the query + result, err := fe.flowDB.RunQuery(&query) + if err != nil { + http.Error(w, "Query failed: "+err.Error(), http.StatusInternalServerError) + return + } + + // Empty result? + if len(result.Timestamps) == 0 { + w.WriteHeader(http.StatusNoContent) + return + } + + // Hints for Prometheus + fmt.Fprintln(w, "# HELP tflow_bytes Bytes transmitted") + fmt.Fprintln(w, "# TYPE tflow_bytes gauge") + + ts := result.Timestamps[0] + + // Print the data + if len(result.TopKeys) > 0 { + for key := range result.TopKeys { + if _, ok := result.Data[ts][key]; ok { + fmt.Fprintf(w, "tflow_bytes{agent=%q,%s} %d\n", getAgent(query), formatBreakdownKey(&key), result.Data[ts][key]) + } + } + } else { + for key, val := range result.Data[ts] { + fmt.Fprintf(w, "tflow_bytes{agent=%q,%s} %d\n", getAgent(query), formatBreakdownKey(&key), val) + } + } +} + +func getAgent(q database.Query) string { + for _, c := range q.Cond { + if c.Field != database.FieldAgent { + continue + } + + return string(c.Operand) + } + + return "" +} + +// formats a breakdown key for prometheus +// see tests for examples +func formatBreakdownKey(key *database.BreakdownKey) string { + return key.Join(`%s="%s"`) +} diff --git a/frontend/prometheus_test.go b/frontend/prometheus_test.go new file mode 100644 index 0000000..da93aa7 --- /dev/null +++ b/frontend/prometheus_test.go @@ -0,0 +1,21 @@ +package frontend + +import ( + "testing" + + "github.com/bio-routing/tflow2/database" + "github.com/stretchr/testify/assert" +) + +func TestFormatBreakdownKey(t *testing.T) { + assert := assert.New(t) + + // empty key + assert.Equal("", formatBreakdownKey(&database.BreakdownKey{})) + + // key with two values + key := &database.BreakdownKey{} + key[database.FieldFamily] = "4" + key[database.FieldDstPfx] = "foo" + assert.Equal(`Family="4",DstPfx="foo"`, formatBreakdownKey(key)) +} diff --git a/frontend/translate.go b/frontend/translate.go new file mode 100644 index 0000000..6537518 --- /dev/null +++ b/frontend/translate.go @@ -0,0 +1,118 @@ +package frontend + +import ( + "net" + "net/url" + "strconv" + "strings" + + "github.com/bio-routing/tflow2/convert" + "github.com/bio-routing/tflow2/database" + "github.com/pkg/errors" +) + +func (fe *Frontend) translateCondition(field, value string) (*database.Condition, error) { + var operatorStr string + + // Extract operator if included in field name + i := strings.IndexRune(field, '.') + if i > 0 { + operatorStr = field[i+1:] + field = field[:i] + } + + var operand []byte + var operator int + fieldNum := database.GetFieldByName(field) + + switch fieldNum { + case database.FieldTimestamp: + op, err := strconv.Atoi(value) + if err != nil { + return nil, err + } + operand = convert.Int64Byte(int64(op)) + + case database.FieldProtocol: + id, err := strconv.Atoi(value) + operand = convert.Uint8Byte(uint8(id)) + if err != nil { + protocolsByName := fe.iana.GetIPProtocolsByName() + operand = convert.Uint8Byte(protocolsByName[value]) + } + + case database.FieldSrcPort, database.FieldDstPort, database.FieldIntIn, database.FieldIntOut: + op, err := strconv.Atoi(value) + if err != nil { + return nil, err + } + operand = convert.Uint16Byte(uint16(op)) + + case database.FieldSrcAddr, database.FieldDstAddr, database.FieldNextHop: + operand = convert.IPByteSlice(value) + + case database.FieldSrcAs, database.FieldDstAs, database.FieldNextHopAs: + op, err := strconv.Atoi(value) + if err != nil { + return nil, err + } + operand = convert.Uint32Byte(uint32(op)) + + case database.FieldSrcPfx, database.FieldDstPfx: + _, pfx, err := net.ParseCIDR(string(value)) + if err != nil { + return nil, err + } + operand = []byte(pfx.String()) + + case database.FieldIntInName, database.FieldIntOutName, database.FieldAgent: + operand = []byte(value) + + default: + return nil, errors.Errorf("unknown field: %s", field) + } + + switch operatorStr { + case "eq", "": + operator = database.OpEqual + case "ne": + operator = database.OpUnequal + case "gt": + operator = database.OpGreater + case "lt": + operator = database.OpSmaller + default: + return nil, errors.Errorf("invalid operator: %s", operatorStr) + } + + return &database.Condition{ + Field: fieldNum, + Operator: operator, + Operand: operand, + }, nil +} + +// translateQuery translates URL parameters to the internal representation of a query +func (fe *Frontend) translateQuery(params url.Values) (q database.Query, errors []error) { + for key, values := range params { + var err error + value := values[0] + switch key { + case "TopN": + q.TopN, err = strconv.Atoi(value) + case "Breakdown": + err = q.Breakdown.Set(strings.Split(value, ",")) + default: + var cond *database.Condition + cond, err = fe.translateCondition(key, value) + if cond != nil { + q.Cond = append(q.Cond, *cond) + } + } + if err != nil { + errors = append(errors, err) + } + } + + return +} diff --git a/frontend/translate_test.go b/frontend/translate_test.go new file mode 100644 index 0000000..bfa6bf5 --- /dev/null +++ b/frontend/translate_test.go @@ -0,0 +1,83 @@ +package frontend + +import ( + "net/url" + "testing" + + "github.com/bio-routing/tflow2/database" + "github.com/stretchr/testify/assert" +) + +func TestTranslateCondition(t *testing.T) { + assert := assert.New(t) + + tests := []struct { + Key string + Value string + ExpectedField int + ExpectedOperator int + }{ + { + Key: "Timestamp.gt", + Value: "1503432000", + ExpectedField: database.FieldTimestamp, + ExpectedOperator: database.OpGreater, + }, + { + Key: "Timestamp.lt", + Value: "1503436000", + ExpectedField: database.FieldTimestamp, + ExpectedOperator: database.OpSmaller, + }, + { + Key: "Protocol.eq", + Value: "6", + ExpectedField: database.FieldProtocol, + ExpectedOperator: database.OpEqual, + }, + { + Key: "SrcAddr", + Value: "1.2.3.4", + ExpectedField: database.FieldSrcAddr, + ExpectedOperator: database.OpEqual, + }, + { + Key: "SrcAs", + Value: "5123", + ExpectedField: database.FieldSrcAs, + ExpectedOperator: database.OpEqual, + }, + { + Key: "SrcPfx", + Value: "10.8.0.0/16", + ExpectedField: database.FieldSrcPfx, + ExpectedOperator: database.OpEqual, + }, + } + + fe := Frontend{} + for _, test := range tests { + cond, err := fe.translateCondition(test.Key, test.Value) + assert.NoError(err) + assert.NotNil(cond) + assert.Equal(test.ExpectedField, cond.Field) + assert.Equal(test.ExpectedOperator, cond.Operator) + } + +} + +func TestTranslateQuery(t *testing.T) { + assert := assert.New(t) + fe := Frontend{} + + query, errors := fe.translateQuery(url.Values{"TopN": []string{"15"}}) + assert.Nil(errors) + assert.Equal(query.TopN, 15) + + query, errors = fe.translateQuery(url.Values{"Timestamp.lt": []string{"42"}, "Timestamp.gt": []string{"23"}}) + assert.Nil(errors) + assert.Len(query.Cond, 2) + + query, errors = fe.translateQuery(url.Values{"Unknown": []string{"foo"}}) + assert.EqualError(errors[0], "unknown field: Unknown") +} diff --git a/go.mod b/go.mod new file mode 100644 index 0000000..cd14c1c --- /dev/null +++ b/go.mod @@ -0,0 +1,17 @@ +module github.com/bio-routing/tflow2 + +require ( + github.com/bio-routing/bio-rd v0.0.0-20190818170353-d73bc83147be + github.com/golang/mock v1.2.0 // indirect + github.com/golang/protobuf v1.3.2 + github.com/pkg/errors v0.8.1 + github.com/prometheus/common v0.7.0 + github.com/sirupsen/logrus v1.4.2 + github.com/soniah/gosnmp v0.0.0-20181018115632-28507a583d6f + github.com/stretchr/testify v1.3.0 + golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 + google.golang.org/grpc v1.17.0 + gopkg.in/yaml.v2 v2.2.2 +) + +go 1.13 diff --git a/go.sum b/go.sum new file mode 100644 index 0000000..5203501 --- /dev/null +++ b/go.sum @@ -0,0 +1,123 @@ +cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw= +github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU= +github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751 h1:JYp7IbQjafoB+tBA3gMyHYHrpOtNuDiK/uB5uXxq5wM= +github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc= +github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4 h1:Hs82Z41s6SdL1CELW+XaDYmOH4hkBN4/N9og/AsOv7E= +github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0= +github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q= +github.com/beorn7/perks v1.0.0 h1:HWo1m869IqiPhD389kmkxeTalrjNbbJTC8LXupb+sl0= +github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8= +github.com/bio-routing/bio-rd v0.0.0-20190818170353-d73bc83147be h1:F6ARKJfXndR/aZBy9MY2AVfAZUgr2tsVBsiNpizlBBs= +github.com/bio-routing/bio-rd v0.0.0-20190818170353-d73bc83147be/go.mod h1:vcvXfobV53gqekz1AJI9J5FfzFRkgQYtLv4I/sszDNA= +github.com/bio-routing/tflow2 v0.0.0-20181230153523-2e308a4a3c3a/go.mod h1:tjzJ5IykdbWNs1FjmiJWsH6SRBl+aWgxO5I44DAegIw= +github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw= +github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= +github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= +github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as= +github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE= +github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk= +github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY= +github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b h1:VKtxabqXZkF25pY9ekfRL6a582T4P37/31XEstQ5p58= +github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q= +github.com/golang/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:tluoj9z5200jBnyusfRPU2LqT6J+DAorxEvtC7LHB+E= +github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/mock v1.2.0 h1:28o5sBqPkBsMGnC6b4MvE2TzSr5/AT4c/1fLqVGIwlk= +github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A= +github.com/golang/protobuf v1.2.0 h1:P3YflyNX/ehuJFLhxviNdFxQPkGK5cDcApsge1SqnvM= +github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/golang/protobuf v1.3.2 h1:6nsPYzhq5kReh6QImI3k5qWzO4PEbvbIW2cwSfR/6xs= +github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0 h1:Iju5GlWwrvL6UBg4zJJt3btmonfrMlCDdsejg4CZE7c= +github.com/grpc-ecosystem/go-grpc-middleware v1.0.0/go.mod h1:FiyG127CGDf3tlThmgyCl78X/SZQqEOJBCDaAfeWzPs= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0 h1:Ovs26xHkKqVztRpIrF/92BcuyuQ/YW4NSIpoGtfXNho= +github.com/grpc-ecosystem/go-grpc-prometheus v1.2.0/go.mod h1:8NvIoxWQoOIhqOTXgfV/d3M/q6VIi02HzZEHgUlZvzk= +github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU= +github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w= +github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck= +github.com/konsorten/go-windows-terminal-sequences v1.0.1 h1:mweAR1A6xJ3oS2pRaGiHgQ4OO8tzTaLawm8vnODuwDk= +github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ= +github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc= +github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU= +github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0= +github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= +github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0= +github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U= +github.com/pkg/errors v0.8.0 h1:WdK/asTD0HN+q6hsWO3/vpuAkAr+tw6aNJNDFFf0+qw= +github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pkg/errors v0.8.1 h1:iURUrRGxPUNPdy5/HRSm+Yj6okJ6UtLINN0Q9M4+h3I= +github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= +github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= +github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= +github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw= +github.com/prometheus/client_golang v1.0.0 h1:vrDKnkGzuGvhNAL56c7DBz29ZL+KxnoR0x7enabFceM= +github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo= +github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90 h1:S/YWwWx/RA8rT8tKFRuGUZhuA90OyIBpPCXkcbwU8DE= +github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA= +github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4= +github.com/prometheus/common v0.6.0/go.mod h1:eBmuwkDJBwy6iBfxCBob6t6dR6ENT/y+J+Zk0j9GMYc= +github.com/prometheus/common v0.7.0 h1:L+1lyG48J1zAQXA3RBX/nG/B3gjlHq0zTt2tlbJLyCY= +github.com/prometheus/common v0.7.0/go.mod h1:DjGbpBbp5NYNiECxcL/VnbXCCaQpKd3tt26CguLLsqA= +github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk= +github.com/prometheus/procfs v0.0.2 h1:6LJUbpNm42llc4HRCuvApCSWB/WfhuNo9K98Q9sNGfs= +github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA= +github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.3.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo= +github.com/sirupsen/logrus v1.4.2 h1:SPIRibHv4MatM3XXNO2BJeFLZwZ2LvZgfQ5+UNI2im4= +github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE= +github.com/soniah/gosnmp v0.0.0-20181018115632-28507a583d6f h1:phh4wMClwuuMjno/FI56YbYPA8cBxsE2BCfBOmevsMo= +github.com/soniah/gosnmp v0.0.0-20181018115632-28507a583d6f/go.mod h1:2Tv1OISIqbjlOCmGzXl+hlZSAHsftdCWHLaLEezhwV8= +github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= +github.com/stretchr/testify v1.2.2 h1:bSDNvY7ZPG5RlJ8otE/7V6gMiyenm9RtJ7IUVIAoJ1w= +github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs= +github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= +github.com/urfave/cli v1.21.0/go.mod h1:lxDj6qX9Q6lWQxIrbrT0nwecwUtRnhVZAJjJZrVUZZQ= +github.com/vishvananda/netlink v1.0.0/go.mod h1:+SR5DhBJrl6ZM7CoCKvpw5BKroDKQ+PJqOg65H/2ktk= +github.com/vishvananda/netns v0.0.0-20180720170159-13995c7128cc/go.mod h1:ZjcWmFBXmLKZu9Nxj3WKYEafiSqer2rnvPr0en9UNpI= +golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4= +golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= +golang.org/x/lint v0.0.0-20180702182130-06c8688daad7/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE= +golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181106065722-10aee1819953/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3 h1:eH6Eip3UpmR+yM/qI9Ijluzb1bNv/cAU/n+6l8tRSis= +golang.org/x/net v0.0.0-20181220203305-927f97764cc3/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980 h1:dfGZHvZk057jK2MCeWus/TowKpJ8y4AmooUzdBSR9GU= +golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= +golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f h1:wMNYb4v58l5UBM7MYRLPG6ZhfOqbKu7X5eyFl8ZhKvA= +golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= +golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894 h1:Cz4ceDQGXuKRnVBDTS23GTn/pU5OE2C0WrNTOYK1Uuc= +golang.org/x/sys v0.0.0-20190422165155-953cdadca894/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs= +golang.org/x/text v0.3.0 h1:g61tztE5qeGQ89tm6NTjjM9VPIm088od1l6aSorWRWg= +golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= +golang.org/x/tools v0.0.0-20180828015842-6cd1fcedba52/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= +google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM= +google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc= +google.golang.org/genproto v0.0.0-20181221175505-bd9b4fb69e2f h1:eT3B0O2ghdSPzjAOznr3oOLyN1HFeYUncYl7FRwg4VI= +google.golang.org/genproto v0.0.0-20181221175505-bd9b4fb69e2f/go.mod h1:7Ep/1NZk928CDR8SjdVbjWNpdIf6nzjE3BTgJDr2Atg= +google.golang.org/grpc v1.16.0/go.mod h1:0JHn/cJsOMiMfNA9+DeHDlAU7KAAB5GDlYFpa9MZMio= +google.golang.org/grpc v1.17.0 h1:TRJYBgMclJvGYn2rIMjj+h9KtMt5r1Ij7ODVRIZkwhk= +google.golang.org/grpc v1.17.0/go.mod h1:6QZJwpn2B+Zp71q/5VxRsJ6NXXVCE5NRUHRo+f3cWCs= +gopkg.in/alecthomas/kingpin.v2 v2.2.6 h1:jMFz6MfLP0/4fUyZle81rXUoxOBFi19VUFKVDOQfozc= +gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405 h1:yhCVgyC4o1eVCa2tZl7eS0r+SDo693bJlVdllGtEeKM= +gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= +gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +gopkg.in/yaml.v2 v2.2.2 h1:ZCJp+EgiOT7lHqUV2J862kp8Qj64Jo6az82+3Td9dZw= +gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= +honnef.co/go/tools v0.0.0-20180728063816-88497007e858/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4= diff --git a/iana/protocols.go b/iana/protocols.go new file mode 100644 index 0000000..3cd8025 --- /dev/null +++ b/iana/protocols.go @@ -0,0 +1,174 @@ +package iana + +type IANA struct { + protocolsByID map[uint8]string + protocolsByName map[string]uint8 +} + +func New() *IANA { + iana := &IANA{ + protocolsByID: map[uint8]string{ + 0: "HOPOPT", + 1: "ICMP", + 2: "IGMP", + 3: "GGP", + 4: "IPv4", + 5: "ST", + 6: "TCP", + 7: "CBT", + 8: "EGP", + 9: "IGP", + 10: "BBN-RCC-MON", + 11: "NVP-II", + 12: "PUP", + 13: "ARGUS (deprecated)", + 14: "EMCON", + 15: "XNET", + 16: "CHAOS", + 17: "UDP", + 18: "MUX", + 19: "DCN-MEAS", + 20: "HMP", + 21: "PRM", + 22: "XNS-IDP", + 23: "TRUNK-1", + 24: "TRUNK-2", + 25: "LEAF-1", + 26: "LEAF-2", + 27: "RDP", + 28: "IRTP", + 29: "ISO-TP4", + 30: "NETBLT", + 31: "MFE-NSP", + 32: "MERIT-INP", + 33: "DCCP", + 34: "3PC", + 35: "IDPR", + 36: "XTP", + 37: "DDP", + 38: "IDPR-CMTP", + 39: "TP++", + 40: "IL", + 41: "IPv6", + 42: "SDRP", + 43: "IPv6-Route", + 44: "IPv6-Frag", + 45: "IDRP", + 46: "RSVP", + 47: "GRE", + 48: "DSR", + 49: "BNA", + 50: "ESP", + 51: "AH", + 52: "I-NLSP", + 53: "SWIPE (deprecated)", + 54: "NARP", + 55: "MOBILE", + 56: "TLSP", + 57: "SKIP", + 58: "IPv6-ICMP", + 59: "IPv6-NoNxt", + 60: "IPv6-Opts", + 61: "HOST-NETWORK", + 62: "CFTP", + 63: "LOCAL-NETWORK", + 64: "SAT-EXPAK", + 65: "KRYPTOLAN", + 66: "RVD", + 67: "IPPC", + 68: "DISTRIBUTED-FS", + 69: "SAT-MON", + 70: "VISA", + 71: "IPCV", + 72: "CPNX", + 73: "CPHB", + 74: "WSN", + 75: "PVP", + 76: "BR-SAT-MON", + 77: "SUN-ND", + 78: "WB-MON", + 79: "WB-EXPAK", + 80: "ISO-IP", + 81: "VMTP", + 82: "SECURE-VMTP", + 83: "VINES", + 84: "IPTM", + 85: "NSFNET-IGP", + 86: "DGP", + 87: "TCF", + 88: "EIGRP", + 89: "OSPFIGP", + 90: "Sprite-RPC", + 91: "LARP", + 92: "MTP", + 93: "AX.25", + 94: "IPIP", + 95: "MICP (deprecated)", + 96: "SCC-SP", + 97: "ETHERIP", + 98: "ENCAP", + 99: "PRIVATE-ENCTYPTION", + 100: "GMTP", + 101: "IFMP", + 102: "PNNI", + 103: "PIM", + 104: "ARIS", + 105: "SCPS", + 106: "QNX", + 107: "A/N", + 108: "IPComp", + 109: "SNP", + 110: "Compaq-Peer", + 111: "IPX-in-IP", + 112: "VRRP", + 113: "PGM", + 114: "ZEROHOP", + 115: "L2TP", + 116: "DDX", + 117: "IATP", + 118: "STP", + 119: "SRP", + 120: "UTI", + 121: "SMP", + 122: "SM (deprecated)", + 123: "PTP", + 124: "ISIS over IPv4", + 125: "FIRE", + 126: "CRTP", + 127: "CRUDP", + 128: "SSCOPMCE", + 129: "IPLT", + 130: "SPS", + 131: "PIPE", + 132: "SCTP", + 133: "FC", + 134: "RSVP-E2E-IGNORE", + 135: "Mobility Header", + 136: "UDPLite", + 137: "MPLS-in-IP", + 138: "manet", + 139: "HIP", + 140: "Shim6", + 141: "WESP", + 142: "ROHC", + 253: "EXPERIMENTAL-253", + 254: "EXPERIMENTAL-254", + 255: "Reserved", + }, + protocolsByName: make(map[string]uint8), + } + + for id, name := range iana.protocolsByID { + iana.protocolsByName[name] = id + } + + return iana +} + +func (iana *IANA) GetIPProtocolsByID() map[uint8]string { + return iana.protocolsByID +} + +func (iana *IANA) GetIPProtocolsByName() map[string]uint8 { + return iana.protocolsByName +} diff --git a/ifserver/ifserver.go b/ifserver/ifserver.go index 90bce21..5c9e56c 100644 --- a/ifserver/ifserver.go +++ b/ifserver/ifserver.go @@ -14,36 +14,42 @@ package ifserver import ( "fmt" + "io" "net" "strconv" "strings" + "sync" "sync/atomic" - "github.com/golang/glog" - "github.com/google/tflow2/convert" - "github.com/google/tflow2/ipfix" - "github.com/google/tflow2/netflow" - "github.com/google/tflow2/stats" + "github.com/bio-routing/tflow2/config" + "github.com/bio-routing/tflow2/convert" + "github.com/bio-routing/tflow2/ipfix" + "github.com/bio-routing/tflow2/netflow" + "github.com/bio-routing/tflow2/srcache" + "github.com/bio-routing/tflow2/stats" + + log "github.com/sirupsen/logrus" ) // fieldMap describes what information is at what index in the slice // that we get from decoding a netflow packet type fieldMap struct { - srcAddr int - dstAddr int - protocol int - packets int - size int - intIn int - intOut int - nextHop int - family int - vlan int - ts int - srcAsn int - dstAsn int - srcPort int - dstPort int + srcAddr int + dstAddr int + protocol int + packets int + size int + intIn int + intOut int + nextHop int + family int + vlan int + ts int + srcAsn int + dstAsn int + srcPort int + dstPort int + samplingPacketInterval int } // IPFIXServer represents a Netflow Collector instance @@ -55,23 +61,26 @@ type IPFIXServer struct { // receiver is the channel used to receive flows from the annotator layer Output chan *netflow.Flow - // debug defines the debug level - debug int + // con is the UDP socket + conn *net.UDPConn + + wg sync.WaitGroup + + sampleRateCache *srcache.SamplerateCache - // bgpAugment is used to decide if ASN information from netflow packets should be used - bgpAugment bool + config *config.Config } -// New creates and starts a new `NetflowServer` instance -func New(listenAddr string, numReaders int, bgpAugment bool, debug int) *IPFIXServer { +// New creates and starts a new `IPFIXServer` instance +func New(numReaders int, config *config.Config, sampleRateCache *srcache.SamplerateCache) *IPFIXServer { ifs := &IPFIXServer{ - debug: debug, - tmplCache: newTemplateCache(), - Output: make(chan *netflow.Flow), - bgpAugment: bgpAugment, + tmplCache: newTemplateCache(), + Output: make(chan *netflow.Flow), + sampleRateCache: sampleRateCache, + config: config, } - addr, err := net.ResolveUDPAddr("udp", listenAddr) + addr, err := net.ResolveUDPAddr("udp", ifs.config.IPFIX.Listen) if err != nil { panic(fmt.Sprintf("ResolveUDPAddr: %v", err)) } @@ -83,6 +92,7 @@ func New(listenAddr string, numReaders int, bgpAugment bool, debug int) *IPFIXSe // Create goroutines that read netflow packet and process it for i := 0; i < numReaders; i++ { + ifs.wg.Add(numReaders) go func(num int) { ifs.packetWorker(num, con) }(i) @@ -91,26 +101,42 @@ func New(listenAddr string, numReaders int, bgpAugment bool, debug int) *IPFIXSe return ifs } +// Close closes the socket and stops the workers +func (ifs *IPFIXServer) Close() { + ifs.conn.Close() + ifs.wg.Wait() +} + +// validateSource checks if src is a configured agent +func (ifs *IPFIXServer) validateSource(src net.IP) bool { + if _, ok := ifs.config.AgentsNameByIP[src.String()]; ok { + return true + } + return false +} + // packetWorker reads netflow packet from socket and handsoff processing to processFlowSets() func (ifs *IPFIXServer) packetWorker(identity int, conn *net.UDPConn) { buffer := make([]byte, 8960) for { length, remote, err := conn.ReadFromUDP(buffer) + if err == io.EOF { + break + } if err != nil { - glog.Errorf("Error reading from socket: %v", err) + log.Errorf("Error reading from socket: %v", err) continue } atomic.AddUint64(&stats.GlobalStats.IPFIXpackets, 1) atomic.AddUint64(&stats.GlobalStats.IPFIXbytes, uint64(length)) - remote.IP = remote.IP.To4() - if remote.IP == nil { - glog.Errorf("Received IPv6 packet. Dropped.") - continue + if !ifs.validateSource(remote.IP) { + log.Errorf("Unknown source: %s", remote.IP.String()) } ifs.processPacket(remote.IP, buffer[:length]) } + ifs.wg.Done() } // processPacket takes a raw netflow packet, send it to the decoder, updates template cache @@ -119,7 +145,7 @@ func (ifs *IPFIXServer) processPacket(remote net.IP, buffer []byte) { length := len(buffer) packet, err := ipfix.Decode(buffer[:length], remote) if err != nil { - glog.Errorf("ipfix.Decode: %v", err) + log.Errorf("ipfix.Decode: %v", err) return } @@ -136,15 +162,15 @@ func (ifs *IPFIXServer) processFlowSets(remote net.IP, domainID uint32, flowSets if template == nil { templateKey := makeTemplateKey(addr, domainID, set.Header.SetID, keyParts) - if ifs.debug > 0 { - glog.Warningf("Template for given FlowSet not found: %s", templateKey) + if ifs.config.Debug > 0 { + log.Warningf("Template for given FlowSet not found: %s", templateKey) } continue } records := template.DecodeFlowSet(*set) if records == nil { - glog.Warning("Error decoding FlowSet") + log.Warning("Error decoding FlowSet") continue } ifs.processFlowSet(template, records, remote, ts, packet) @@ -156,36 +182,85 @@ func (ifs *IPFIXServer) processFlowSet(template *ipfix.TemplateRecords, records fm := generateFieldMap(template) for _, r := range records { - if fm.family == 4 { - atomic.AddUint64(&stats.GlobalStats.Flows4, 1) - } else if fm.family == 6 { - atomic.AddUint64(&stats.GlobalStats.Flows6, 1) - } else { - glog.Warning("Unknown address family") + /*if template.OptionScopes != nil { + if fm.samplingPacketInterval >= 0 { + ifs.sampleRateCache.Set(agent, uint64(convert.Uint32(r.Values[fm.samplingPacketInterval]))) + } continue + }*/ + + if fm.family >= 0 { + if fm.family == 4 { + atomic.AddUint64(&stats.GlobalStats.Flows4, 1) + } else if fm.family == 6 { + atomic.AddUint64(&stats.GlobalStats.Flows6, 1) + } else { + log.Warning("Unknown address family") + continue + } } var fl netflow.Flow fl.Router = agent fl.Timestamp = ts - fl.Family = uint32(fm.family) - fl.Packets = convert.Uint32(r.Values[fm.packets]) - fl.Size = uint64(convert.Uint32(r.Values[fm.size])) - fl.Protocol = convert.Uint32(r.Values[fm.protocol]) - fl.IntIn = convert.Uint32(r.Values[fm.intIn]) - fl.IntOut = convert.Uint32(r.Values[fm.intOut]) - fl.SrcPort = convert.Uint32(r.Values[fm.srcPort]) - fl.DstPort = convert.Uint32(r.Values[fm.dstPort]) - fl.SrcAddr = convert.Reverse(r.Values[fm.srcAddr]) - fl.DstAddr = convert.Reverse(r.Values[fm.dstAddr]) - fl.NextHop = convert.Reverse(r.Values[fm.nextHop]) - - if !ifs.bgpAugment { - fl.SrcAs = convert.Uint32(r.Values[fm.srcAsn]) - fl.DstAs = convert.Uint32(r.Values[fm.dstAsn]) + + if fm.family >= 0 { + fl.Family = uint32(fm.family) + } + + if fm.packets >= 0 { + fl.Packets = convert.Uint32(r.Values[fm.packets]) + } + + if fm.size >= 0 { + fl.Size = uint64(convert.Uint32(r.Values[fm.size])) + } + + if fm.protocol >= 0 { + fl.Protocol = convert.Uint32(r.Values[fm.protocol]) + } + + if fm.intIn >= 0 { + fl.IntIn = convert.Uint32(r.Values[fm.intIn]) } - if ifs.debug > 2 { + if fm.intOut >= 0 { + fl.IntOut = convert.Uint32(r.Values[fm.intOut]) + } + + if fm.srcPort >= 0 { + fl.SrcPort = convert.Uint32(r.Values[fm.srcPort]) + } + + if fm.dstPort >= 0 { + fl.DstPort = convert.Uint32(r.Values[fm.dstPort]) + } + + if fm.srcAddr >= 0 { + fl.SrcAddr = convert.Reverse(r.Values[fm.srcAddr]) + } + + if fm.dstAddr >= 0 { + fl.DstAddr = convert.Reverse(r.Values[fm.dstAddr]) + } + + if fm.nextHop >= 0 { + fl.NextHop = convert.Reverse(r.Values[fm.nextHop]) + } + + if !ifs.config.BGPAugmentation.Enabled { + if fm.srcAsn >= 0 { + fl.SrcAs = convert.Uint32(r.Values[fm.srcAsn]) + } + + if fm.dstAsn >= 0 { + fl.DstAs = convert.Uint32(r.Values[fm.dstAsn]) + } + } + + fl.Samplerate = ifs.sampleRateCache.Get(agent) + + if ifs.config.Debug > 2 { Dump(&fl) } @@ -221,7 +296,25 @@ func DumpTemplate(tmpl *ipfix.TemplateRecords) { // generateFieldMap processes a TemplateRecord and populates a fieldMap accordingly // the FieldMap can then be used to read fields from a flow func generateFieldMap(template *ipfix.TemplateRecords) *fieldMap { - var fm fieldMap + fm := fieldMap{ + srcAddr: -1, + dstAddr: -1, + protocol: -1, + packets: -1, + size: -1, + intIn: -1, + intOut: -1, + nextHop: -1, + family: -1, + vlan: -1, + ts: -1, + srcAsn: -1, + dstAsn: -1, + srcPort: -1, + dstPort: -1, + samplingPacketInterval: -1, + } + i := -1 for _, f := range template.Records { i++ @@ -259,8 +352,11 @@ func generateFieldMap(template *ipfix.TemplateRecords) *fieldMap { fm.srcAsn = i case ipfix.DstAs: fm.dstAsn = i + case ipfix.SamplingPacketInterval: + fm.samplingPacketInterval = i } } + return &fm } diff --git a/ifserver/template_cache.go b/ifserver/template_cache.go index 6f5fd0d..7360ccc 100644 --- a/ifserver/template_cache.go +++ b/ifserver/template_cache.go @@ -14,7 +14,7 @@ package ifserver import ( "sync" - "github.com/google/tflow2/ipfix" + "github.com/bio-routing/tflow2/ipfix" ) type templateCache struct { diff --git a/intfmapper/intfmapper.go b/intfmapper/intfmapper.go new file mode 100644 index 0000000..1d1c16d --- /dev/null +++ b/intfmapper/intfmapper.go @@ -0,0 +1,150 @@ +package intfmapper + +import ( + "strconv" + "strings" + "sync" + "time" + + "github.com/bio-routing/tflow2/config" + "github.com/pkg/errors" + log "github.com/sirupsen/logrus" + g "github.com/soniah/gosnmp" +) + +const ( + ifNameOID = "1.3.6.1.2.1.31.1.1.1.1" +) + +type IntfMapperInterface interface { + GetInterfaceIDByName(agent string) InterfaceIDByName + GetInterfaceNameByID(agent string) InterfaceNameByID +} + +// Mapper is a service that maps agents interface IDs to names +type Mapper struct { + agents []config.Agent + renewInterval int64 + timeout time.Duration + interfaceIDByNameByAgent map[string]InterfaceIDByName + interfaceNameByIDByAgent map[string]InterfaceNameByID + interfaceIDByNameByAgentMu sync.RWMutex + interfaceNameByIDByAgentMu sync.RWMutex +} + +// InterfaceIDByName maps interface names to IDs +type InterfaceIDByName map[string]uint16 + +// InterfaceNameByID maps IDs to interface names +type InterfaceNameByID map[uint16]string + +// New creates a new Mapper and starts workers for all agents that periodicly renew interface mappings +func New(agents []config.Agent, renewInterval int64, timeout time.Duration) (*Mapper, error) { + m := &Mapper{ + agents: agents, + renewInterval: renewInterval, + timeout: timeout, + interfaceIDByNameByAgent: make(map[string]InterfaceIDByName), + interfaceNameByIDByAgent: make(map[string]InterfaceNameByID), + } + + for _, agent := range m.agents { + m.interfaceIDByNameByAgent[agent.Name] = make(InterfaceIDByName) + if err := m.renewMapping(agent); err != nil { + return nil, errors.Wrapf(err, "Unable to get interface mapping for %s", agent.Name) + } + } + + m.startRenewWorkers() + + return m, nil +} + +func (m *Mapper) startRenewWorkers() { + for _, agent := range m.agents { + go func(agent config.Agent) { + for { + time.Sleep(time.Second * time.Duration(m.renewInterval)) + err := m.renewMapping(agent) + if err != nil { + log.Warningf("Unable to renew interface mapping for %s: %v", agent.Name, err) + } + } + }(agent) + } +} + +func (m *Mapper) renewMapping(a config.Agent) error { + var snmpClient *g.GoSNMP + tmp := *g.Default + snmpClient = &tmp + snmpClient.Target = a.IPAddress + snmpClient.Community = a.SNMPCommunity + snmpClient.Timeout = m.timeout + + if err := snmpClient.Connect(); err != nil { + return errors.Wrap(err, "SNMP client unable to connect") + } + defer snmpClient.Conn.Close() + + newMapByName := make(InterfaceIDByName) + err := snmpClient.BulkWalk(ifNameOID, newMapByName.update) + if err != nil { + return errors.Wrap(err, "walk error") + } + + newMapByID := make(InterfaceNameByID) + for name, id := range newMapByName { + newMapByID[id] = name + } + + m.interfaceIDByNameByAgentMu.Lock() + defer m.interfaceIDByNameByAgentMu.Unlock() + + m.interfaceIDByNameByAgent[a.Name] = newMapByName + m.interfaceNameByIDByAgent[a.Name] = newMapByID + + return nil +} + +func (im InterfaceIDByName) update(pdu g.SnmpPDU) error { + oid := strings.Split(pdu.Name, ".") + id, err := strconv.Atoi(oid[len(oid)-1]) + if err != nil { + return errors.Wrap(err, "Unable to convert interface id") + } + + if pdu.Type != g.OctetString { + return errors.Errorf("Unexpected PDU type: %d", pdu.Type) + } + + im[string(pdu.Value.([]byte))] = uint16(id) + + return nil +} + +// GetInterfaceIDByName gets the InterfaceIDByName +func (m *Mapper) GetInterfaceIDByName(agent string) InterfaceIDByName { + m.interfaceIDByNameByAgentMu.RLock() + defer m.interfaceIDByNameByAgentMu.RUnlock() + + ret := make(InterfaceIDByName) + for key, value := range m.interfaceIDByNameByAgent[agent] { + ret[key] = value + } + + return ret +} + +// GetInterfaceNameByID gets the InterfaceNameByID +func (m *Mapper) GetInterfaceNameByID(agent string) InterfaceNameByID { + m.interfaceNameByIDByAgentMu.RLock() + defer m.interfaceNameByIDByAgentMu.RUnlock() + + ret := make(InterfaceNameByID) + for key, value := range m.interfaceNameByIDByAgent[agent] { + ret[key] = value + } + + return ret +} diff --git a/ipfix/decode.go b/ipfix/decode.go index f2f21ab..34f3428 100644 --- a/ipfix/decode.go +++ b/ipfix/decode.go @@ -16,7 +16,8 @@ import ( "net" "unsafe" - "github.com/google/tflow2/convert" + "github.com/bio-routing/tflow2/convert" + "github.com/pkg/errors" ) const ( @@ -32,7 +33,7 @@ const TemplateSetID = 2 // errorIncompatibleVersion prints an error message in case the detected version is not supported func errorIncompatibleVersion(version uint16) error { - return fmt.Errorf("IPFIX: Incompatible protocol version v%d, only v10 is supported", version) + return errors.Errorf("IPFIX: Incompatible protocol version v%d, only v10 is supported", version) } // Decode is the main function of this package. It converts raw packet bytes to Packet struct. diff --git a/ipfix/field_db.go b/ipfix/field_db.go index d0bab7f..0ebac2a 100644 --- a/ipfix/field_db.go +++ b/ipfix/field_db.go @@ -108,4 +108,5 @@ const ( ApplicationDescription = 94 ApplicationTag = 95 ApplicationName = 96 + SamplingPacketInterval = 305 ) diff --git a/netflow/netflow.pb.go b/netflow/netflow.pb.go index 93bd004..d3737fe 100644 --- a/netflow/netflow.pb.go +++ b/netflow/netflow.pb.go @@ -11,6 +11,7 @@ It is generated from these files: It has these top-level messages: Pfx Flow + Intf Flows */ package netflow @@ -19,6 +20,11 @@ import proto "github.com/golang/protobuf/proto" import fmt "fmt" import math "math" +import ( + context "golang.org/x/net/context" + grpc "google.golang.org/grpc" +) + // Reference imports to suppress errors if they are not otherwise used. var _ = proto.Marshal var _ = fmt.Errorf @@ -95,6 +101,8 @@ type Flow struct { SrcPort uint32 `protobuf:"varint,17,opt,name=src_port,json=srcPort" json:"src_port,omitempty"` // DST port DstPort uint32 `protobuf:"varint,18,opt,name=dst_port,json=dstPort" json:"dst_port,omitempty"` + // Samplerate + Samplerate uint64 `protobuf:"varint,19,opt,name=samplerate" json:"samplerate,omitempty"` } func (m *Flow) Reset() { *m = Flow{} } @@ -228,16 +236,52 @@ func (m *Flow) GetDstPort() uint32 { return 0 } +func (m *Flow) GetSamplerate() uint64 { + if m != nil { + return m.Samplerate + } + return 0 +} + +// Intf groups an interfaces ID and name +type Intf struct { + // ID is an interface ID + Id uint32 `protobuf:"varint,1,opt,name=id" json:"id,omitempty"` + // name is an interfaces name + Name string `protobuf:"bytes,2,opt,name=name" json:"name,omitempty"` +} + +func (m *Intf) Reset() { *m = Intf{} } +func (m *Intf) String() string { return proto.CompactTextString(m) } +func (*Intf) ProtoMessage() {} +func (*Intf) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } + +func (m *Intf) GetId() uint32 { + if m != nil { + return m.Id + } + return 0 +} + +func (m *Intf) GetName() string { + if m != nil { + return m.Name + } + return "" +} + // Flows defines a groups of flows type Flows struct { // Group of flows Flows []*Flow `protobuf:"bytes,1,rep,name=flows" json:"flows,omitempty"` + // Mapping of interface names to IDs + InterfaceMapping []*Intf `protobuf:"bytes,2,rep,name=interface_mapping,json=interfaceMapping" json:"interface_mapping,omitempty"` } func (m *Flows) Reset() { *m = Flows{} } func (m *Flows) String() string { return proto.CompactTextString(m) } func (*Flows) ProtoMessage() {} -func (*Flows) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{2} } +func (*Flows) Descriptor() ([]byte, []int) { return fileDescriptor0, []int{3} } func (m *Flows) GetFlows() []*Flow { if m != nil { @@ -246,38 +290,124 @@ func (m *Flows) GetFlows() []*Flow { return nil } +func (m *Flows) GetInterfaceMapping() []*Intf { + if m != nil { + return m.InterfaceMapping + } + return nil +} + func init() { proto.RegisterType((*Pfx)(nil), "netflow.pfx") proto.RegisterType((*Flow)(nil), "netflow.Flow") + proto.RegisterType((*Intf)(nil), "netflow.Intf") proto.RegisterType((*Flows)(nil), "netflow.Flows") } +// Reference imports to suppress errors if they are not otherwise used. +var _ context.Context +var _ grpc.ClientConn + +// This is a compile-time assertion to ensure that this generated file +// is compatible with the grpc package it is being compiled against. +const _ = grpc.SupportPackageIsVersion4 + +// Client API for Annotator service + +type AnnotatorClient interface { + Annotate(ctx context.Context, in *Flow, opts ...grpc.CallOption) (*Flow, error) +} + +type annotatorClient struct { + cc *grpc.ClientConn +} + +func NewAnnotatorClient(cc *grpc.ClientConn) AnnotatorClient { + return &annotatorClient{cc} +} + +func (c *annotatorClient) Annotate(ctx context.Context, in *Flow, opts ...grpc.CallOption) (*Flow, error) { + out := new(Flow) + err := grpc.Invoke(ctx, "/netflow.annotator/Annotate", in, out, c.cc, opts...) + if err != nil { + return nil, err + } + return out, nil +} + +// Server API for Annotator service + +type AnnotatorServer interface { + Annotate(context.Context, *Flow) (*Flow, error) +} + +func RegisterAnnotatorServer(s *grpc.Server, srv AnnotatorServer) { + s.RegisterService(&_Annotator_serviceDesc, srv) +} + +func _Annotator_Annotate_Handler(srv interface{}, ctx context.Context, dec func(interface{}) error, interceptor grpc.UnaryServerInterceptor) (interface{}, error) { + in := new(Flow) + if err := dec(in); err != nil { + return nil, err + } + if interceptor == nil { + return srv.(AnnotatorServer).Annotate(ctx, in) + } + info := &grpc.UnaryServerInfo{ + Server: srv, + FullMethod: "/netflow.annotator/Annotate", + } + handler := func(ctx context.Context, req interface{}) (interface{}, error) { + return srv.(AnnotatorServer).Annotate(ctx, req.(*Flow)) + } + return interceptor(ctx, in, info, handler) +} + +var _Annotator_serviceDesc = grpc.ServiceDesc{ + ServiceName: "netflow.annotator", + HandlerType: (*AnnotatorServer)(nil), + Methods: []grpc.MethodDesc{ + { + MethodName: "Annotate", + Handler: _Annotator_Annotate_Handler, + }, + }, + Streams: []grpc.StreamDesc{}, + Metadata: "netflow.proto", +} + func init() { proto.RegisterFile("netflow.proto", fileDescriptor0) } var fileDescriptor0 = []byte{ - // 383 bytes of a gzipped FileDescriptorProto - 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x09, 0x6e, 0x88, 0x02, 0xff, 0x6c, 0x51, 0x5d, 0xab, 0xd3, 0x40, - 0x10, 0x25, 0x4d, 0x9b, 0xb4, 0x93, 0xe6, 0xaa, 0x0b, 0xea, 0x28, 0x22, 0xa1, 0x22, 0x44, 0x90, - 0xfb, 0x70, 0xfd, 0x05, 0xf7, 0x45, 0xec, 0x93, 0x25, 0x7f, 0x20, 0xc4, 0x7c, 0x60, 0xb8, 0x49, - 0x76, 0xd9, 0x99, 0xd2, 0xe8, 0xbf, 0xf6, 0x1f, 0xc8, 0xec, 0xa6, 0xf5, 0xc5, 0xb7, 0x3d, 0xe7, - 0xcc, 0x9c, 0x3d, 0x33, 0x03, 0xe9, 0xd4, 0x72, 0x37, 0xe8, 0xcb, 0xbd, 0xb1, 0x9a, 0xb5, 0x8a, - 0x17, 0x78, 0xf8, 0x04, 0xa1, 0xe9, 0x66, 0x75, 0x07, 0xab, 0xe3, 0x09, 0x83, 0x2c, 0xc8, 0xf7, - 0xc5, 0xea, 0x78, 0x52, 0x0a, 0xd6, 0x63, 0x45, 0x4f, 0xb8, 0x72, 0x8c, 0x7b, 0x1f, 0xfe, 0x84, - 0xb0, 0xfe, 0x3a, 0xe8, 0x8b, 0x7a, 0x05, 0x91, 0xd5, 0x67, 0x6e, 0xed, 0xd2, 0xb0, 0x20, 0xe1, - 0xbb, 0x6a, 0xec, 0x87, 0x5f, 0xae, 0x2d, 0x2d, 0x16, 0xa4, 0xde, 0xc0, 0x96, 0x6c, 0x5d, 0x56, - 0x4d, 0x63, 0x31, 0x74, 0x1d, 0x31, 0xd9, 0xfa, 0xb1, 0x69, 0xac, 0x48, 0x0d, 0xb1, 0x97, 0xd6, - 0x5e, 0x6a, 0x88, 0x9d, 0xf4, 0x16, 0xb6, 0x2e, 0x6b, 0xad, 0x07, 0xdc, 0x38, 0xbf, 0x1b, 0x56, - 0x08, 0xb1, 0xa9, 0xea, 0xa7, 0x96, 0x09, 0x23, 0x27, 0x5d, 0xa1, 0x04, 0xa7, 0xfe, 0x77, 0x8b, - 0x71, 0x16, 0xe4, 0xeb, 0xc2, 0xbd, 0xd5, 0x4b, 0x88, 0xfa, 0x89, 0xcb, 0x7e, 0xc2, 0xad, 0x2b, - 0xde, 0xf4, 0x13, 0x1f, 0x27, 0xf5, 0x1a, 0x62, 0xa1, 0xf5, 0x99, 0x71, 0xe7, 0xf3, 0xf6, 0x13, - 0x7f, 0x3f, 0xb3, 0x84, 0x9a, 0xda, 0x99, 0xcb, 0x9f, 0xda, 0x20, 0xf8, 0x50, 0x82, 0xbf, 0x69, - 0x23, 0x56, 0x6e, 0x14, 0xc2, 0xc4, 0x5b, 0xc9, 0x20, 0x24, 0xb4, 0x1b, 0x83, 0x70, 0xef, 0x69, - 0x19, 0x82, 0xd4, 0x7b, 0x48, 0xae, 0x46, 0xa2, 0xa5, 0x4e, 0xdb, 0x2d, 0x5e, 0x8f, 0xa4, 0xde, - 0xc1, 0x8e, 0xfb, 0xb1, 0x25, 0xae, 0x46, 0x83, 0x77, 0x59, 0x90, 0x87, 0xc5, 0x3f, 0x42, 0x7d, - 0x04, 0x59, 0x53, 0x69, 0xba, 0x19, 0x9f, 0x65, 0x41, 0x9e, 0x3c, 0xec, 0xef, 0x6f, 0x47, 0xec, - 0xe6, 0x42, 0x82, 0x9c, 0xba, 0x59, 0xca, 0xe4, 0x6f, 0x29, 0x7b, 0xfe, 0xbf, 0xb2, 0x86, 0x58, - 0xca, 0x96, 0x23, 0x18, 0x6d, 0x19, 0x5f, 0xf8, 0x9d, 0x89, 0x81, 0xb6, 0x7c, 0x3d, 0x82, 0x93, - 0x94, 0x97, 0xa4, 0x49, 0x5b, 0x3e, 0x7c, 0x86, 0x8d, 0x9c, 0x9c, 0xd4, 0x07, 0xd8, 0x88, 0x25, - 0x61, 0x90, 0x85, 0x79, 0xf2, 0x90, 0xde, 0xfe, 0x10, 0xb9, 0xf0, 0xda, 0x8f, 0xc8, 0x1d, 0xe8, - 0xcb, 0xdf, 0x00, 0x00, 0x00, 0xff, 0xff, 0x74, 0xa2, 0x1d, 0x5c, 0x6d, 0x02, 0x00, 0x00, + // 476 bytes of a gzipped FileDescriptorProto + 0x1f, 0x8b, 0x08, 0x00, 0x00, 0x00, 0x00, 0x00, 0x02, 0xff, 0x6c, 0x52, 0xcd, 0x8e, 0xd3, 0x30, + 0x10, 0x26, 0x6d, 0xfa, 0x37, 0xdd, 0x2e, 0xbb, 0x46, 0x80, 0x59, 0xa1, 0x55, 0x15, 0x84, 0x14, + 0xf6, 0xb0, 0x87, 0x72, 0x40, 0xe2, 0xd6, 0x0b, 0xa2, 0x07, 0x44, 0x95, 0x17, 0x88, 0x4c, 0xe2, + 0xb0, 0xd6, 0x26, 0xb6, 0x65, 0x4f, 0xb5, 0x81, 0xb7, 0xe0, 0x8d, 0xd1, 0xd8, 0x69, 0x61, 0x11, + 0x37, 0x7f, 0x3f, 0x33, 0xfe, 0xec, 0x19, 0x58, 0x69, 0x89, 0x4d, 0x6b, 0x1e, 0x6e, 0xad, 0x33, + 0x68, 0xd8, 0x6c, 0x80, 0xd9, 0x3b, 0x18, 0xdb, 0xa6, 0x67, 0xe7, 0x30, 0xda, 0xed, 0x79, 0xb2, + 0x4e, 0xf2, 0xb3, 0x62, 0xb4, 0xdb, 0x33, 0x06, 0x69, 0x27, 0xfc, 0x3d, 0x1f, 0x05, 0x26, 0x9c, + 0xb3, 0x5f, 0x29, 0xa4, 0x9f, 0x5a, 0xf3, 0xc0, 0x5e, 0xc0, 0xd4, 0x99, 0x03, 0x4a, 0x37, 0x14, + 0x0c, 0x88, 0xf8, 0x46, 0x74, 0xaa, 0xfd, 0x11, 0xca, 0x56, 0xc5, 0x80, 0xd8, 0x2b, 0x98, 0x7b, + 0x57, 0x95, 0xa2, 0xae, 0x1d, 0x1f, 0x87, 0x8a, 0x99, 0x77, 0xd5, 0xb6, 0xae, 0x1d, 0x49, 0xb5, + 0xc7, 0x28, 0xa5, 0x51, 0xaa, 0x3d, 0x06, 0xe9, 0x0a, 0xe6, 0x21, 0x6b, 0x65, 0x5a, 0x3e, 0x09, + 0xfd, 0x4e, 0x98, 0x71, 0x98, 0x59, 0x51, 0xdd, 0x4b, 0xf4, 0x7c, 0x1a, 0xa4, 0x23, 0xa4, 0xe0, + 0x5e, 0xfd, 0x94, 0x7c, 0xb6, 0x4e, 0xf2, 0xb4, 0x08, 0x67, 0xf6, 0x1c, 0xa6, 0x4a, 0x63, 0xa9, + 0x34, 0x9f, 0x07, 0xf3, 0x44, 0x69, 0xdc, 0x69, 0xf6, 0x12, 0x66, 0x44, 0x9b, 0x03, 0xf2, 0x45, + 0xcc, 0xab, 0x34, 0x7e, 0x3d, 0x20, 0x85, 0xd2, 0xb2, 0xc7, 0xf2, 0xce, 0x58, 0x0e, 0x31, 0x14, + 0xe1, 0xcf, 0xc6, 0x52, 0xab, 0xf0, 0x14, 0xcf, 0x97, 0xb1, 0x15, 0x3d, 0xc4, 0x13, 0x1d, 0x9e, + 0xe1, 0xf9, 0x59, 0xa4, 0xe9, 0x11, 0x9e, 0x5d, 0xc3, 0xf2, 0xd8, 0x88, 0xb4, 0x55, 0xd0, 0x16, + 0x43, 0xaf, 0xad, 0x67, 0xaf, 0x61, 0x81, 0xaa, 0x93, 0x1e, 0x45, 0x67, 0xf9, 0xf9, 0x3a, 0xc9, + 0xc7, 0xc5, 0x1f, 0x82, 0xbd, 0x05, 0xfa, 0xa6, 0xd2, 0x36, 0x3d, 0x7f, 0xba, 0x4e, 0xf2, 0xe5, + 0xe6, 0xec, 0xf6, 0x34, 0xc4, 0xa6, 0x2f, 0x28, 0xc8, 0xbe, 0xe9, 0xc9, 0x46, 0x77, 0x93, 0xed, + 0xe2, 0x7f, 0xb6, 0xda, 0x23, 0xd9, 0x86, 0x21, 0x58, 0xe3, 0x90, 0x5f, 0xc6, 0x3f, 0xa3, 0x06, + 0xc6, 0xe1, 0x71, 0x08, 0x41, 0x62, 0x51, 0xa2, 0x22, 0x92, 0xae, 0x01, 0xbc, 0xe8, 0x6c, 0x2b, + 0x9d, 0x40, 0xc9, 0x9f, 0x85, 0x4f, 0xfd, 0x8b, 0xc9, 0x6e, 0x20, 0xdd, 0x69, 0x6c, 0x68, 0x7f, + 0x54, 0x1d, 0xd6, 0x61, 0x55, 0x8c, 0x54, 0x4d, 0x63, 0xd0, 0xa2, 0x93, 0x61, 0x11, 0x16, 0x45, + 0x38, 0x67, 0x77, 0x30, 0xa1, 0xf5, 0xf1, 0xec, 0x0d, 0x4c, 0x28, 0x9e, 0xe7, 0xc9, 0x7a, 0x9c, + 0x2f, 0x37, 0xab, 0x53, 0x5e, 0x92, 0x8b, 0xa8, 0xb1, 0x8f, 0x70, 0xa9, 0x34, 0x4a, 0xd7, 0x88, + 0x4a, 0x96, 0x9d, 0xb0, 0x56, 0xe9, 0xef, 0x7c, 0xf4, 0x4f, 0x01, 0xdd, 0x5d, 0x5c, 0x9c, 0x7c, + 0x5f, 0xa2, 0x6d, 0xf3, 0x01, 0x16, 0x42, 0x6b, 0x83, 0x02, 0x8d, 0x63, 0x37, 0x30, 0xdf, 0x46, + 0x20, 0xd9, 0xe3, 0xab, 0xae, 0x1e, 0xc3, 0xec, 0xc9, 0xb7, 0x69, 0xd8, 0xb0, 0xf7, 0xbf, 0x03, + 0x00, 0x00, 0xff, 0xff, 0x2a, 0x39, 0x13, 0x10, 0x2e, 0x03, 0x00, 0x00, } diff --git a/netflow/netflow.proto b/netflow/netflow.proto index 62f9a72..32d2394 100644 --- a/netflow/netflow.proto +++ b/netflow/netflow.proto @@ -2,6 +2,10 @@ syntax = "proto3"; package netflow; +service annotator { + rpc Annotate (Flow) returns (Flow) {} +} + // Pfx defines an IP prefix message pfx { // IPv4 or IPv6 address @@ -65,10 +69,25 @@ message Flow { //DST port uint32 dst_port = 18; + + //Samplerate + uint64 samplerate = 19; +} + +// Intf groups an interfaces ID and name +message Intf { + // ID is an interface ID + uint32 id = 1; + + // name is an interfaces name + string name = 2; } // Flows defines a groups of flows message Flows { // Group of flows repeated Flow flows = 1; + + // Mapping of interface names to IDs + repeated Intf interface_mapping = 2; } \ No newline at end of file diff --git a/netflow/pfx.go b/netflow/pfx.go new file mode 100644 index 0000000..a636991 --- /dev/null +++ b/netflow/pfx.go @@ -0,0 +1,11 @@ +package netflow + +import "net" + +// ToIPNet returns the net.IPNet representation for the Prefix +func (pfx *Pfx) ToIPNet() *net.IPNet { + return &net.IPNet{ + IP: pfx.IP, + Mask: pfx.Mask, + } +} diff --git a/nf9/decode.go b/nf9/decode.go index 171342c..7b64241 100644 --- a/nf9/decode.go +++ b/nf9/decode.go @@ -16,7 +16,8 @@ import ( "net" "unsafe" - "github.com/google/tflow2/convert" + "github.com/bio-routing/tflow2/convert" + "github.com/pkg/errors" ) const ( @@ -30,9 +31,12 @@ const FlowSetIDTemplateMax = 255 // TemplateFlowSetID is the FlowSetID reserved for template flow sets const TemplateFlowSetID = 0 +// OptionTemplateFlowSetID is the FlowSetID reserved for option template flow sets +const OptionTemplateFlowSetID = 1 + // errorIncompatibleVersion prints an error message in case the detected version is not supported func errorIncompatibleVersion(version uint16) error { - return fmt.Errorf("NF9: Incompatible protocol version v%d, only v9 is supported", version) + return errors.Errorf("NF9: Incompatible protocol version v%d, only v9 is supported", version) } // Decode is the main function of this package. It converts raw packet bytes to Packet struct. @@ -40,11 +44,11 @@ func Decode(raw []byte, remote net.IP) (*Packet, error) { data := convert.Reverse(raw) //TODO: Make it endian aware. This assumes a little endian machine pSize := len(data) - bufSize := 1500 - buffer := [1500]byte{} + bufSize := 9216 + buffer := [9216]byte{} if pSize > bufSize { - panic("Buffer too small\n") + panic(fmt.Sprintf("Buffer too small (%d/%d)", pSize, bufSize)) } // copy data into array as arrays allow us to cast the shit out of it @@ -77,6 +81,9 @@ func Decode(raw []byte, remote net.IP) (*Packet, error) { if fls.Header.FlowSetID == TemplateFlowSetID { // Template decodeTemplate(&packet, ptr, uintptr(fls.Header.Length)-sizeOfFlowSetHeader, remote) + } else if fls.Header.FlowSetID == OptionTemplateFlowSetID { + // Option Template + decodeOption(&packet, ptr, uintptr(fls.Header.Length)-sizeOfFlowSetHeader, remote) } else if fls.Header.FlowSetID > FlowSetIDTemplateMax { // Actual data packet decodeData(&packet, ptr, uintptr(fls.Header.Length)-sizeOfFlowSetHeader) @@ -88,17 +95,39 @@ func Decode(raw []byte, remote net.IP) (*Packet, error) { return &packet, nil } -// decodeData decodes a flowSet from `packet` -func decodeData(packet *Packet, headerPtr unsafe.Pointer, size uintptr) { - flsh := (*FlowSetHeader)(unsafe.Pointer(headerPtr)) - data := unsafe.Pointer(uintptr(headerPtr) - uintptr(flsh.Length)) +// decodeOption decodes an option template from `packet` +func decodeOption(packet *Packet, end unsafe.Pointer, size uintptr, remote net.IP) { + min := uintptr(end) - size - fls := &FlowSet{ - Header: flsh, - Flows: (*(*[1<<31 - 1]byte)(data))[sizeOfFlowSetHeader:flsh.Length], - } + for uintptr(end) > min { + headerPtr := unsafe.Pointer(uintptr(end) - sizeOfOptionsTemplateRecordHeader) - packet.FlowSets = append(packet.FlowSets, fls) + tmplRecs := &TemplateRecords{} + hdr := (*OptionsTemplateRecordHeader)(unsafe.Pointer(headerPtr)) + tmplRecs.Header = &TemplateRecordHeader{TemplateID: hdr.TemplateID} + tmplRecs.Packet = packet + tmplRecs.Records = make([]*TemplateRecord, 0, numPreAllocRecs) + + ptr := headerPtr + // Process option scopes + for i := uint16(0); i < hdr.OptionScopeLength/uint16(sizeOfOptionScope); i++ { + optScope := (*OptionScope)(ptr) + tmplRecs.OptionScopes = append(tmplRecs.OptionScopes, optScope) + ptr = unsafe.Pointer(uintptr(ptr) - sizeOfOptionScope) + } + + // Process option fields + for i := uint16(0); i < hdr.OptionLength/uint16(sizeOfTemplateRecord); i++ { + opt := (*TemplateRecord)(ptr) + tmplRecs.Records = append(tmplRecs.Records, opt) + ptr = unsafe.Pointer(uintptr(ptr) - sizeOfTemplateRecord) + } + + //packet.OptionsTemplates = append(packet.OptionsTemplates, tmplRecs) + packet.Templates = append(packet.Templates, tmplRecs) + + end = unsafe.Pointer(uintptr(end) - uintptr(hdr.OptionScopeLength) - uintptr(hdr.OptionLength) - sizeOfOptionsTemplateRecordHeader) + } } // decodeTemplate decodes a template from `packet` @@ -125,6 +154,19 @@ func decodeTemplate(packet *Packet, end unsafe.Pointer, size uintptr, remote net } } +// decodeData decodes a flowSet from `packet` +func decodeData(packet *Packet, headerPtr unsafe.Pointer, size uintptr) { + flsh := (*FlowSetHeader)(unsafe.Pointer(headerPtr)) + data := unsafe.Pointer(uintptr(headerPtr) - uintptr(flsh.Length)) + + fls := &FlowSet{ + Header: flsh, + Flows: (*(*[1<<31 - 1]byte)(data))[sizeOfFlowSetHeader:flsh.Length], + } + + packet.FlowSets = append(packet.FlowSets, fls) +} + // PrintHeader prints the header of `packet` func PrintHeader(p *Packet) { fmt.Printf("Version: %d\n", p.Header.Version) diff --git a/nf9/decode_test.go b/nf9/decode_test.go index fcc7539..439f172 100644 --- a/nf9/decode_test.go +++ b/nf9/decode_test.go @@ -14,10 +14,13 @@ package nf9 import ( "net" "testing" + + "github.com/bio-routing/tflow2/convert" ) -func TestDecode(t *testing.T) { +/*func TestDecode(t *testing.T) { s := []byte{0, 0, 0, 8, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 0, 0, 249, 0, 6, 187, 71, 213, 103, 123, 68, 213, 103, 10, 5, 0, 0, 11, 0, 0, 0, 15, 0, 65, 0, 15, 0, 65, 0, 26, 187, 1, 239, 181, 153, 192, 66, 185, 34, 93, 13, 31, 65, 195, 66, 185, 0, 8, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 0, 0, 249, 0, 6, 183, 71, 213, 103, 7, 39, 213, 103, 224, 156, 0, 0, 153, 2, 0, 0, 15, 0, 65, 0, 15, 0, 65, 0, 30, 80, 0, 105, 187, 153, 192, 66, 185, 136, 100, 80, 151, 65, 195, 66, 185, 128, 0, 221, 1, 0, 0, 0, 0, 8, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 6, 180, 71, 213, 103, 164, 62, 213, 103, 160, 0, 0, 0, 4, 0, 0, 0, 21, 0, 28, 0, 21, 0, 28, 0, 16, 80, 0, 87, 204, 185, 192, 66, 185, 147, 23, 217, 172, 93, 193, 66, 185, 64, 0, 223, 1, 0, 0, 0, 8, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 0, 0, 137, 0, 6, 191, 71, 213, 103, 248, 44, 213, 103, 125, 17, 0, 0, 57, 0, 0, 0, 21, 0, 72, 0, 21, 0, 72, 0, 24, 187, 1, 145, 226, 185, 192, 66, 185, 88, 160, 125, 74, 84, 193, 66, 185, 0, 8, 237, 240, 149, 1, 185, 92, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 194, 71, 213, 103, 124, 61, 213, 103, 164, 0, 0, 0, 3, 0, 0, 0, 39, 0, 22, 0, 39, 0, 22, 0, 19, 89, 216, 80, 0, 235, 5, 64, 100, 41, 193, 66, 185, 243, 121, 19, 50, 128, 0, 221, 1, 0, 0, 0, 221, 134, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 0, 0, 129, 0, 6, 185, 71, 213, 103, 234, 62, 213, 103, 201, 53, 0, 0, 177, 0, 0, 0, 21, 0, 73, 0, 21, 0, 73, 0, 24, 187, 1, 181, 211, 201, 173, 78, 254, 255, 201, 2, 2, 0, 0, 0, 0, 0, 0, 128, 254, 11, 0, 0, 0, 0, 0, 0, 0, 35, 0, 14, 64, 80, 20, 0, 42, 179, 79, 172, 109, 9, 172, 109, 133, 55, 19, 15, 48, 96, 34, 3, 42, 104, 0, 222, 1, 0, 0, 8, 237, 240, 149, 1, 185, 92, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 194, 71, 213, 103, 226, 68, 213, 103, 201, 21, 0, 0, 18, 0, 0, 0, 116, 0, 22, 0, 116, 0, 22, 0, 26, 172, 230, 187, 1, 101, 0, 64, 100, 49, 193, 66, 185, 36, 107, 175, 54, 0, 8, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 194, 71, 213, 103, 222, 67, 213, 103, 211, 5, 0, 0, 6, 0, 0, 0, 15, 0, 65, 0, 15, 0, 65, 0, 27, 80, 0, 243, 165, 153, 192, 66, 185, 138, 98, 227, 172, 65, 195, 66, 185, 0, 8, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 0, 0, 129, 0, 6, 188, 71, 213, 103, 188, 71, 213, 103, 122, 0, 0, 0, 1, 0, 0, 0, 184, 0, 15, 0, 184, 0, 15, 0, 24, 145, 193, 230, 15, 213, 1, 64, 100, 16, 193, 66, 185, 210, 7, 182, 193, 188, 0, 221, 1, 0, 0, 0, 221, 134, 212, 186, 30, 36, 78, 204, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 179, 71, 213, 103, 215, 49, 213, 103, 248, 17, 0, 0, 13, 0, 0, 0, 119, 0, 16, 0, 119, 0, 16, 0, 26, 2, 201, 187, 1, 220, 90, 4, 46, 254, 94, 0, 2, 0, 0, 0, 0, 0, 0, 128, 254, 34, 44, 143, 56, 96, 67, 7, 176, 0, 70, 21, 1, 96, 34, 3, 42, 142, 0, 0, 0, 12, 176, 206, 250, 14, 3, 19, 240, 128, 40, 3, 42, 104, 0, 222, 1, 0, 0, 0, 0, 8, 237, 240, 149, 1, 185, 92, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 183, 71, 213, 103, 47, 68, 213, 103, 54, 23, 0, 0, 10, 0, 0, 0, 73, 0, 22, 0, 73, 0, 22, 0, 26, 79, 154, 187, 1, 59, 4, 64, 100, 85, 193, 66, 185, 43, 156, 16, 199, 68, 0, 221, 1, 0, 0, 0, 221, 134, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 6, 179, 71, 213, 103, 179, 71, 213, 103, 61, 0, 0, 0, 1, 0, 0, 0, 21, 0, 34, 0, 21, 0, 34, 0, 16, 80, 0, 251, 209, 201, 173, 78, 254, 255, 201, 2, 2, 0, 0, 0, 0, 0, 0, 128, 254, 16, 32, 0, 0, 0, 0, 0, 0, 33, 8, 1, 64, 80, 20, 0, 42, 159, 9, 125, 55, 155, 45, 217, 165, 2, 0, 20, 1, 96, 34, 3, 42, 100, 0, 220, 1, 0, 8, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 1, 179, 71, 213, 103, 19, 59, 213, 103, 152, 0, 0, 0, 2, 0, 0, 0, 15, 0, 93, 0, 15, 0, 93, 0, 3, 3, 0, 0, 153, 192, 66, 185, 119, 160, 222, 68, 31, 194, 66, 185, 60, 0, 228, 1, 2, 0, 0, 1, 6, 0, 56, 0, 6, 0, 80, 0, 1, 0, 5, 0, 1, 0, 4, 0, 4, 0, 21, 0, 4, 0, 22, 0, 4, 0, 1, 0, 4, 0, 2, 0, 2, 0, 253, 0, 2, 0, 252, 0, 2, 0, 14, 0, 2, 0, 10, 0, 2, 0, 11, 0, 2, 0, 7, 0, 4, 0, 15, 0, 4, 0, 12, 0, 4, 0, 8, 0, 18, 0, 228, 1, 80, 0, 0, 0, 0, 0, 0, 0, 8, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 0, 0, 129, 0, 6, 192, 71, 213, 103, 192, 71, 213, 103, 52, 0, 0, 0, 1, 0, 0, 0, 21, 0, 178, 0, 21, 0, 178, 0, 16, 187, 1, 62, 139, 185, 192, 66, 185, 168, 8, 125, 74, 54, 194, 66, 185, 68, 0, 221, 1, 0, 0, 0, 0, 8, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 17, 189, 71, 213, 103, 189, 71, 213, 103, 76, 0, 0, 0, 1, 0, 0, 0, 15, 0, 65, 0, 15, 0, 65, 0, 0, 123, 0, 234, 170, 153, 192, 66, 185, 221, 186, 9, 5, 65, 195, 66, 185, 64, 0, 223, 1, 0, 0, 0, 8, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 188, 71, 213, 103, 103, 71, 213, 103, 247, 0, 0, 0, 3, 0, 0, 0, 26, 0, 21, 0, 26, 0, 21, 0, 26, 46, 155, 80, 0, 81, 4, 64, 100, 102, 193, 66, 185, 46, 208, 58, 216, 0, 8, 201, 173, 78, 201, 2, 0, 229, 27, 75, 201, 2, 0, 0, 0, 0, 241, 0, 6, 179, 71, 213, 103, 101, 71, 213, 103, 247, 0, 0, 0, 3, 0, 0, 0, 26, 0, 21, 0, 26, 0, 21, 0, 26, 145, 155, 80, 0, 81, 4, 64, 100, 102, 193, 66, 185, 46, 208, 58, 216, 128, 0, 221, 1, 0, 0, 0, 221, 134, 100, 249, 80, 201, 2, 0, 228, 27, 75, 201, 2, 0, 0, 0, 0, 129, 0, 6, 180, 71, 213, 103, 134, 71, 213, 103, 38, 3, 0, 0, 2, 0, 0, 0, 21, 0, 34, 0, 21, 0, 34, 0, 24, 187, 1, 218, 156, 201, 173, 78, 254, 255, 201, 2, 2, 0, 0, 0, 0, 0, 0, 128, 254, 11, 0, 0, 0, 0, 0, 0, 0, 78, 0, 1, 64, 80, 20, 0, 42, 35, 211, 203, 103, 92, 74, 192, 76, 7, 0, 20, 1, 96, 34, 3, 42, 104, 0, 222, 1, 0, 0, 0, 0, 167, 51, 204, 11, 128, 207, 118, 88, 75, 91, 213, 103, 19, 0, 9, 0} + s = convert.Reverse(s) packet, err := Decode(s, net.IP([]byte{1, 1, 1, 1})) if err != nil { @@ -29,6 +32,45 @@ func TestDecode(t *testing.T) { if !testEq(packet.FlowSets[0].Flows, flowSet) { t.Errorf("Decoded FlowSet is not the expected one. Got: %v, Expected: %v\n", packet.FlowSets[0].Flows, flowSet) } +}*/ + +func TestDecode2(t *testing.T) { + s := []byte{ + 8, 0, // Length + 44, 0, // Type + + 8, 0, // Length + 43, 0, // Type + + 8, 0, // Length + 42, 0, // Type + + 8, 0, // Length + 41, 0, // Type + + 4, 0, // Scope 1 Field Length + 1, 0, // Scope 1 Field Type = 1 = System + + 16, 0, // OptionLength + 4, 0, // OptionScopeLength + 10, 1, // TemplateID + + 30, 0, // Length + 1, 0, // FlowSetID + + 0, 0, 0, 0, //Source ID + 167, 51, 204, 11, // Sequence Number + 128, 207, 118, 88, // UNIX secs + 75, 91, 213, 103, // sysUpTime + 1, 0, // Count + 9, 0} // Version + s = convert.Reverse(s) + + _, err := Decode(s, net.IP([]byte{1, 1, 1, 1})) + if err != nil { + t.Errorf("Decoding packet failed: %v\n", err) + } + } func testEq(a, b []byte) bool { diff --git a/nf9/packet.go b/nf9/packet.go index bff5070..9a86c0c 100644 --- a/nf9/packet.go +++ b/nf9/packet.go @@ -206,7 +206,7 @@ type Packet struct { var sizeOfHeader = unsafe.Sizeof(Header{}) -// GetTemplateRecords generate a list of all Template Records in the packet. +// GetTemplateRecords returns a list of all Template Records in the packet. // Template Records can be used to decode Data FlowSets to Data Records. func (p *Packet) GetTemplateRecords() []*TemplateRecords { return p.Templates diff --git a/nf9/templates.go b/nf9/templates.go index b8328a6..6efd964 100644 --- a/nf9/templates.go +++ b/nf9/templates.go @@ -18,6 +18,12 @@ const ( numPreAllocFlowDataRecs = 20 ) +var ( + sizeOfTemplateRecordHeader = unsafe.Sizeof(TemplateRecordHeader{}) + sizeOfOptionsTemplateRecordHeader = unsafe.Sizeof(OptionsTemplateRecordHeader{}) + sizeOfOptionScope = unsafe.Sizeof(OptionScope{}) +) + // TemplateRecordHeader represents the header of a template record type TemplateRecordHeader struct { // Number of fields in this Template Record. Because a Template FlowSet @@ -33,13 +39,33 @@ type TemplateRecordHeader struct { TemplateID uint16 } -var sizeOfTemplateRecordHeader = unsafe.Sizeof(TemplateRecordHeader{}) +// OptionsTemplateRecordHeader represents the header of an option template record +type OptionsTemplateRecordHeader struct { + // The length (in bytes) of any options field definitions + // contained in this Options Template Record. + OptionLength uint16 + + // Number of fields in this Template Record. Because a Template FlowSet + // usually contains multiple Template Records, this field allows the + // Collector to determine the end of the current Template Record and + // the start of the next. + OptionScopeLength uint16 + + // Each of the newly generated Template Records is given a unique + // Template ID. This uniqueness is local to the Observation Domain that + // generated the Template ID. Template IDs of Data FlowSets are numbered + // from 256 to 65535. + TemplateID uint16 +} // TemplateRecords is a single template that describes structure of a Flow Record // (actual Netflow data). type TemplateRecords struct { Header *TemplateRecordHeader + // List of scopes + OptionScopes []*OptionScope + // List of fields in this Template Record. Records []*TemplateRecord @@ -48,6 +74,18 @@ type TemplateRecords struct { Values [][]byte } +// OptionScope represents an option scope in an options template flowset +type OptionScope struct { + // The length (in bytes) of the Scope field, as it would appear in + //an Options Data Record. + ScopeFieldLength uint16 + + //A numeric value that represents the type of field that would + //appear in the Options Template Record. Refer to the Field Type + //Definitions section. + ScopeFieldType uint16 +} + //TemplateRecord represents a Template Record as described in RFC3954 type TemplateRecord struct { // The length (in bytes) of the field. @@ -70,7 +108,7 @@ var sizeOfTemplateRecord = unsafe.Sizeof(TemplateRecord{}) // DecodeFlowSet uses current TemplateRecord to decode data in Data FlowSet to // a list of Flow Data Records. -func (dtpl *TemplateRecords) DecodeFlowSet(set FlowSet) (list []FlowDataRecord) { +/*func (dtpl *TemplateRecords) DecodeFlowSet(set FlowSet) (list []FlowDataRecord) { if set.Header.FlowSetID != dtpl.Header.TemplateID { return nil } @@ -94,6 +132,32 @@ func (dtpl *TemplateRecords) DecodeFlowSet(set FlowSet) (list []FlowDataRecord) n = n - count } + return +}*/ + +// DecodeFlowSet uses current TemplateRecord to decode data in Data FlowSet to +// a list of Flow Data Records. +func DecodeFlowSet(templateRecords []*TemplateRecord, set FlowSet) (list []FlowDataRecord) { + var record FlowDataRecord + + // Pre-allocate some room for flows + list = make([]FlowDataRecord, 0, numPreAllocFlowDataRecs) + + // Assume total record length must be >= 4, otherwise it is impossible + // to distinguish between padding and new record. Padding MUST be + // supported. + n := len(set.Flows) + count := 0 + + for n >= 4 { + record.Values, count = parseFieldValues(set.Flows[0:n], templateRecords) + if record.Values == nil { + return + } + list = append(list, record) + n = n - count + } + return } diff --git a/nfserver/nfserver.go b/nfserver/nfserver.go index da590c8..a848a32 100644 --- a/nfserver/nfserver.go +++ b/nfserver/nfserver.go @@ -14,36 +14,44 @@ package nfserver import ( "fmt" + "io" "net" "strconv" "strings" + "sync" "sync/atomic" - "github.com/golang/glog" - "github.com/google/tflow2/convert" - "github.com/google/tflow2/netflow" - "github.com/google/tflow2/nf9" - "github.com/google/tflow2/stats" + "github.com/bio-routing/tflow2/config" + "github.com/bio-routing/tflow2/srcache" + + "github.com/bio-routing/tflow2/convert" + "github.com/bio-routing/tflow2/netflow" + "github.com/bio-routing/tflow2/nf9" + "github.com/bio-routing/tflow2/stats" + log "github.com/sirupsen/logrus" ) // fieldMap describes what information is at what index in the slice // that we get from decoding a netflow packet type fieldMap struct { - srcAddr int - dstAddr int - protocol int - packets int - size int - intIn int - intOut int - nextHop int - family int - vlan int - ts int - srcAsn int - dstAsn int - srcPort int - dstPort int + srcAddr int + dstAddr int + protocol int + packets int + size int + intIn int + intOut int + nextHop int + family int + vlan int + ts int + srcAsn int + dstAsn int + srcPort int + dstPort int + flowSamplerID int + samplingInterval int + flowSamplerRandomInterval int } // NetflowServer represents a Netflow Collector instance @@ -55,62 +63,83 @@ type NetflowServer struct { // receiver is the channel used to receive flows from the annotator layer Output chan *netflow.Flow - // debug defines the debug level - debug int + // con is the UDP socket + conn *net.UDPConn + + wg sync.WaitGroup + + sampleRateCache *srcache.SamplerateCache - // bgpAugment is used to decide if ASN information from netflow packets should be used - bgpAugment bool + config *config.Config } // New creates and starts a new `NetflowServer` instance -func New(listenAddr string, numReaders int, bgpAugment bool, debug int) *NetflowServer { +func New(numReaders int, config *config.Config, sampleRateCache *srcache.SamplerateCache) *NetflowServer { nfs := &NetflowServer{ - debug: debug, - tmplCache: newTemplateCache(), - Output: make(chan *netflow.Flow), - bgpAugment: bgpAugment, + tmplCache: newTemplateCache(), + Output: make(chan *netflow.Flow), + sampleRateCache: sampleRateCache, + config: config, } - addr, err := net.ResolveUDPAddr("udp", listenAddr) + addr, err := net.ResolveUDPAddr("udp", nfs.config.NetflowV9.Listen) if err != nil { panic(fmt.Sprintf("ResolveUDPAddr: %v", err)) } - con, err := net.ListenUDP("udp", addr) + conn, err := net.ListenUDP("udp", addr) if err != nil { panic(fmt.Sprintf("Listen: %v", err)) } + nfs.conn = conn // Create goroutines that read netflow packet and process it + nfs.wg.Add(numReaders) for i := 0; i < numReaders; i++ { go func(num int) { - nfs.packetWorker(num, con) + nfs.packetWorker(num) }(i) } return nfs } +// Close closes the socket and stops the workers +func (nfs *NetflowServer) Close() { + nfs.conn.Close() + nfs.wg.Wait() +} + +// validateSource checks if src is a configured agent +func (nfs *NetflowServer) validateSource(src net.IP) bool { + if _, ok := nfs.config.AgentsNameByIP[src.String()]; ok { + return true + } + return false +} + // packetWorker reads netflow packet from socket and handsoff processing to processFlowSets() -func (nfs *NetflowServer) packetWorker(identity int, conn *net.UDPConn) { +func (nfs *NetflowServer) packetWorker(identity int) { buffer := make([]byte, 8960) for { - length, remote, err := conn.ReadFromUDP(buffer) + length, remote, err := nfs.conn.ReadFromUDP(buffer) + if err == io.EOF { + break + } if err != nil { - glog.Errorf("Error reading from socket: %v", err) + log.Errorf("Error reading from socket: %v", err) continue } atomic.AddUint64(&stats.GlobalStats.Netflow9packets, 1) atomic.AddUint64(&stats.GlobalStats.Netflow9bytes, uint64(length)) - remote.IP = remote.IP.To4() - if remote.IP == nil { - glog.Errorf("Received IPv6 packet. Dropped.") - continue + if !nfs.validateSource(remote.IP) { + log.Errorf("Unknown source: %s", remote.IP.String()) } nfs.processPacket(remote.IP, buffer[:length]) } + nfs.wg.Done() } // processPacket takes a raw netflow packet, send it to the decoder, updates template cache @@ -119,7 +148,7 @@ func (nfs *NetflowServer) processPacket(remote net.IP, buffer []byte) { length := len(buffer) packet, err := nf9.Decode(buffer[:length], remote) if err != nil { - glog.Errorf("nf9packet.Decode: %v", err) + log.Errorf("nf9packet.Decode: %v", err) return } @@ -136,15 +165,15 @@ func (nfs *NetflowServer) processFlowSets(remote net.IP, sourceID uint32, flowSe if template == nil { templateKey := makeTemplateKey(addr, sourceID, set.Header.FlowSetID, keyParts) - if nfs.debug > 0 { - glog.Warningf("Template for given FlowSet not found: %s", templateKey) + if nfs.config.Debug > 0 { + log.Warningf("Template for given FlowSet not found: %s", templateKey) } continue } - records := template.DecodeFlowSet(*set) + records := nf9.DecodeFlowSet(template.Records, *set) if records == nil { - glog.Warning("Error decoding FlowSet") + log.Warning("Error decoding FlowSet") continue } nfs.processFlowSet(template, records, remote, ts, packet) @@ -156,36 +185,90 @@ func (nfs *NetflowServer) processFlowSet(template *nf9.TemplateRecords, records fm := generateFieldMap(template) for _, r := range records { - if fm.family == 4 { - atomic.AddUint64(&stats.GlobalStats.Flows4, 1) - } else if fm.family == 6 { - atomic.AddUint64(&stats.GlobalStats.Flows6, 1) - } else { - glog.Warning("Unknown address family") + if template.OptionScopes != nil { + if fm.samplingInterval >= 0 { + nfs.sampleRateCache.Set(agent, uint64(convert.Uint32(r.Values[fm.samplingInterval]))) + } + + if fm.flowSamplerRandomInterval >= 0 { + nfs.sampleRateCache.Set(agent, uint64(convert.Uint32(r.Values[fm.flowSamplerRandomInterval]))) + } continue } + if fm.family >= 0 { + switch fm.family { + case 4: + atomic.AddUint64(&stats.GlobalStats.Flows4, 1) + case 6: + atomic.AddUint64(&stats.GlobalStats.Flows6, 1) + default: + log.Warning("Unknown address family") + continue + } + } + var fl netflow.Flow fl.Router = agent fl.Timestamp = ts - fl.Family = uint32(fm.family) - fl.Packets = convert.Uint32(r.Values[fm.packets]) - fl.Size = uint64(convert.Uint32(r.Values[fm.size])) - fl.Protocol = convert.Uint32(r.Values[fm.protocol]) - fl.IntIn = convert.Uint32(r.Values[fm.intIn]) - fl.IntOut = convert.Uint32(r.Values[fm.intOut]) - fl.SrcPort = convert.Uint32(r.Values[fm.srcPort]) - fl.DstPort = convert.Uint32(r.Values[fm.dstPort]) - fl.SrcAddr = convert.Reverse(r.Values[fm.srcAddr]) - fl.DstAddr = convert.Reverse(r.Values[fm.dstAddr]) - fl.NextHop = convert.Reverse(r.Values[fm.nextHop]) - - if !nfs.bgpAugment { - fl.SrcAs = convert.Uint32(r.Values[fm.srcAsn]) - fl.DstAs = convert.Uint32(r.Values[fm.dstAsn]) + + if fm.family >= 0 { + fl.Family = uint32(fm.family) + } + + if fm.packets >= 0 { + fl.Packets = convert.Uint32(r.Values[fm.packets]) + } + + if fm.size >= 0 { + fl.Size = uint64(convert.Uint32(r.Values[fm.size])) + } + + if fm.protocol >= 0 { + fl.Protocol = convert.Uint32(r.Values[fm.protocol]) } - if nfs.debug > 2 { + if fm.intIn >= 0 { + fl.IntIn = convert.Uint32(r.Values[fm.intIn]) + } + + if fm.intOut >= 0 { + fl.IntOut = convert.Uint32(r.Values[fm.intOut]) + } + + if fm.srcPort >= 0 { + fl.SrcPort = convert.Uint32(r.Values[fm.srcPort]) + } + + if fm.dstPort >= 0 { + fl.DstPort = convert.Uint32(r.Values[fm.dstPort]) + } + + if fm.srcAddr >= 0 { + fl.SrcAddr = convert.Reverse(r.Values[fm.srcAddr]) + } + + if fm.dstAddr >= 0 { + fl.DstAddr = convert.Reverse(r.Values[fm.dstAddr]) + } + + if fm.nextHop >= 0 { + fl.NextHop = convert.Reverse(r.Values[fm.nextHop]) + } + + if !nfs.config.BGPAugmentation.Enabled { + if fm.srcAsn >= 0 { + fl.SrcAs = convert.Uint32(r.Values[fm.srcAsn]) + } + + if fm.dstAsn >= 0 { + fl.DstAs = convert.Uint32(r.Values[fm.dstAsn]) + } + } + + fl.Samplerate = nfs.sampleRateCache.Get(agent) + + if nfs.config.Debug > 2 { Dump(&fl) } @@ -221,7 +304,27 @@ func DumpTemplate(tmpl *nf9.TemplateRecords) { // generateFieldMap processes a TemplateRecord and populates a fieldMap accordingly // the FieldMap can then be used to read fields from a flow func generateFieldMap(template *nf9.TemplateRecords) *fieldMap { - var fm fieldMap + fm := fieldMap{ + srcAddr: -1, + dstAddr: -1, + protocol: -1, + packets: -1, + size: -1, + intIn: -1, + intOut: -1, + nextHop: -1, + family: -1, + vlan: -1, + ts: -1, + srcAsn: -1, + dstAsn: -1, + srcPort: -1, + dstPort: -1, + flowSamplerID: -1, + samplingInterval: -1, + flowSamplerRandomInterval: -1, + } + i := -1 for _, f := range template.Records { i++ @@ -259,6 +362,10 @@ func generateFieldMap(template *nf9.TemplateRecords) *fieldMap { fm.srcAsn = i case nf9.DstAs: fm.dstAsn = i + case nf9.SamplingInterval: + fm.samplingInterval = i + case nf9.FlowSamplerRandomInterval: + fm.flowSamplerRandomInterval = i } } return &fm diff --git a/nfserver/template_cache.go b/nfserver/template_cache.go index c8c9017..5bfca03 100644 --- a/nfserver/template_cache.go +++ b/nfserver/template_cache.go @@ -14,7 +14,7 @@ package nfserver import ( "sync" - "github.com/google/tflow2/nf9" + "github.com/bio-routing/tflow2/nf9" ) type templateCache struct { diff --git a/packet/dot1q.go b/packet/dot1q.go new file mode 100644 index 0000000..ba2c221 --- /dev/null +++ b/packet/dot1q.go @@ -0,0 +1,29 @@ +package packet + +import ( + "fmt" + "unsafe" +) + +var ( + // SizeOfDot1Q is the size of an Dot1Q header in bytes + SizeOfDot1Q = unsafe.Sizeof(Dot1Q{}) +) + +// Dot1Q represents an 802.1q header +type Dot1Q struct { + EtherType uint16 + TCI uint16 +} + +// DecodeDot1Q decodes an 802.1q header +func DecodeDot1Q(raw unsafe.Pointer, length uint32) (*Dot1Q, error) { + if SizeOfEthernetII > uintptr(length) { + return nil, fmt.Errorf("Frame is too short: %d", length) + } + + ptr := unsafe.Pointer(uintptr(raw) - SizeOfDot1Q) + dot1qHeader := (*Dot1Q)(ptr) + + return dot1qHeader, nil +} diff --git a/packet/ethernet.go b/packet/ethernet.go new file mode 100644 index 0000000..f05686d --- /dev/null +++ b/packet/ethernet.go @@ -0,0 +1,79 @@ +// Copyright 2017 EXARING AG. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package packet + +import ( + "github.com/pkg/errors" + "net" + "unsafe" + + "github.com/bio-routing/tflow2/convert" +) + +const ( + // EtherTypeARP is Address Resolution Protocol EtherType value + EtherTypeARP = 0x0806 + + // EtherTypeIPv4 is Internet Protocol version 4 EtherType value + EtherTypeIPv4 = 0x0800 + + // EtherTypeIPv6 is Internet Protocol Version 6 EtherType value + EtherTypeIPv6 = 0x86DD + + // EtherTypeLACP is Link Aggregation Control Protocol EtherType value + EtherTypeLACP = 0x8809 + + // EtherTypeIEEE8021Q is VLAN-tagged frame (IEEE 802.1Q) EtherType value + EtherTypeIEEE8021Q = 0x8100 +) + +var ( + // SizeOfEthernetII is the size of an EthernetII header in bytes + SizeOfEthernetII = unsafe.Sizeof(ethernetII{}) +) + +// EthernetHeader represents layer two IEEE 802.11 +type EthernetHeader struct { + SrcMAC net.HardwareAddr + DstMAC net.HardwareAddr + EtherType uint16 +} + +type ethernetII struct { + EtherType uint16 + SrcMAC [6]byte + DstMAC [6]byte +} + +// DecodeEthernet decodes an EthernetII header +func DecodeEthernet(raw unsafe.Pointer, length uint32) (*EthernetHeader, error) { + if SizeOfEthernetII > uintptr(length) { + return nil, errors.Errorf("Frame is too short: %d", length) + } + + ptr := unsafe.Pointer(uintptr(raw) - SizeOfEthernetII) + ethHeader := (*ethernetII)(ptr) + + srcMAC := ethHeader.SrcMAC[:] + dstMAC := ethHeader.DstMAC[:] + + srcMAC = convert.Reverse(srcMAC) + dstMAC = convert.Reverse(dstMAC) + + h := &EthernetHeader{ + SrcMAC: net.HardwareAddr(srcMAC), + DstMAC: net.HardwareAddr(dstMAC), + EtherType: ethHeader.EtherType, + } + + return h, nil +} diff --git a/packet/ethernet_test.go b/packet/ethernet_test.go new file mode 100644 index 0000000..3ea83c9 --- /dev/null +++ b/packet/ethernet_test.go @@ -0,0 +1,74 @@ +// Copyright 2017 EXARING AG. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package packet + +import ( + "testing" + "unsafe" +) + +func TestDecode(t *testing.T) { + data := []byte{ + 128, // Header Length + 92, 180, 133, 203, // ACK Number + 31, 4, 191, 24, // Sequence Number + 222, 148, // DST port + 80, 0, // SRC port + + 19, 131, 191, 87, // DST IP + 238, 153, 37, 185, // SRC IP + 186, 25, // Header Checksum + 6, // Protocol + 62, // TTL + 0, 64, // Flags + Fragment offset + 131, 239, // Identifier + 212, 5, // Total Length + 0, // TOS + 69, // Version + Length + + 0, 8, // EtherType + 185, 28, 4, 113, 78, 32, // Source MAC + 148, 2, 127, 31, 113, 128, // Destination MAC + } + + pSize := len(data) + bufSize := 128 + buffer := [128]byte{} + + if pSize > bufSize { + panic("Buffer too small\n") + } + + // copy data into array as arrays allow us to cast the shit out of it + for i := 0; i < pSize; i++ { + buffer[bufSize-pSize+i] = data[i] + } + + bufferPtr := unsafe.Pointer(&buffer) + headerPtr := unsafe.Pointer(uintptr(bufferPtr) + uintptr(bufSize)) + + etherHeader, err := DecodeEthernet(headerPtr, 128) + if err != nil { + t.Errorf("Decoding packet failed: %v\n", err) + } + + if etherHeader.DstMAC.String() != "80:71:1f:7f:02:94" { + t.Errorf("Unexpected DST MAC address. Expected %s. Got %s", "80:71:1f:7f:02:94", etherHeader.DstMAC.String()) + } + + if etherHeader.SrcMAC.String() != "20:4e:71:04:1c:b9" { + t.Errorf("Unexpected DST MAC address. Expected %s. Got %s", "20:4e:71:04:1c:b9", etherHeader.SrcMAC.String()) + } + + if etherHeader.EtherType != EtherTypeIPv4 { + t.Errorf("Unexpected ethertyp. Expected %d. Got %d", EtherTypeIPv4, etherHeader.EtherType) + } +} diff --git a/packet/ipv4.go b/packet/ipv4.go new file mode 100644 index 0000000..8169895 --- /dev/null +++ b/packet/ipv4.go @@ -0,0 +1,42 @@ +// Copyright 2017 EXARING AG. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package packet + +import ( + "github.com/pkg/errors" + "unsafe" +) + +var ( + SizeOfIPv4Header = unsafe.Sizeof(IPv4Header{}) +) + +type IPv4Header struct { + DstAddr [4]byte + SrcAddr [4]byte + HeaderChecksum uint16 + Protocol uint8 + TTL uint8 + FlagsFragmentOffset uint16 + Identification uint16 + TotalLength uint16 + DSCP uint8 + VersionHeaderLength uint8 +} + +func DecodeIPv4(raw unsafe.Pointer, length uint32) (*IPv4Header, error) { + if SizeOfIPv4Header > uintptr(length) { + return nil, errors.Errorf("Frame is too short: %d", length) + } + + return (*IPv4Header)(unsafe.Pointer(uintptr(raw) - SizeOfIPv4Header)), nil +} diff --git a/packet/ipv6.go b/packet/ipv6.go new file mode 100644 index 0000000..a4d5adf --- /dev/null +++ b/packet/ipv6.go @@ -0,0 +1,38 @@ +// Copyright 2017 EXARING AG. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package packet + +import ( + "github.com/pkg/errors" + "unsafe" +) + +var ( + SizeOfIPv6Header = unsafe.Sizeof(IPv6Header{}) +) + +type IPv6Header struct { + DstAddr [16]byte + SrcAddr [16]byte + HopLimit uint8 + NextHeader uint8 + PayloadLength uint16 + VersionTrafficClassFlowLabel uint32 +} + +func DecodeIPv6(raw unsafe.Pointer, length uint32) (*IPv6Header, error) { + if SizeOfIPv6Header > uintptr(length) { + return nil, errors.Errorf("Frame is too short: %d", length) + } + + return (*IPv6Header)(unsafe.Pointer(uintptr(raw) - SizeOfIPv6Header)), nil +} diff --git a/packet/tcp.go b/packet/tcp.go new file mode 100644 index 0000000..c8f7fda --- /dev/null +++ b/packet/tcp.go @@ -0,0 +1,46 @@ +// Copyright 2017 EXARING AG. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package packet + +import ( + "unsafe" + + "github.com/pkg/errors" +) + +const ( + TCP = 6 +) + +var ( + SizeOfTCPHeader = unsafe.Sizeof(TCPHeader{}) +) + +type TCPHeader struct { + UrgentPointer uint16 + Checksum uint16 + Window uint16 + Flags uint8 + DataOffset uint8 + ACKNumber uint32 + SequenceNumber uint32 + DstPort uint16 + SrcPort uint16 +} + +func DecodeTCP(raw unsafe.Pointer, length uint32) (*TCPHeader, error) { + if SizeOfTCPHeader > uintptr(length) { + return nil, errors.Errorf("Frame is too short: %d", length) + } + + return (*TCPHeader)(unsafe.Pointer(uintptr(raw) - SizeOfTCPHeader)), nil +} diff --git a/packet/udp.go b/packet/udp.go new file mode 100644 index 0000000..ec87a35 --- /dev/null +++ b/packet/udp.go @@ -0,0 +1,45 @@ +// Copyright 2017 EXARING AG. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package packet + +import ( + "unsafe" + + "github.com/pkg/errors" +) + +const ( + // UDP IP protocol number + UDP = 17 +) + +var ( + // SizeOfUDPHeader is the size of a UDP header in bytes + SizeOfUDPHeader = unsafe.Sizeof(UDPHeader{}) +) + +// UDPHeader represents a UDP header +type UDPHeader struct { + Checksum uint16 + Length uint16 + DstPort uint16 + SrcPort uint16 +} + +// DecodeUDP decodes a UDP header +func DecodeUDP(raw unsafe.Pointer, length uint32) (*UDPHeader, error) { + if SizeOfTCPHeader > uintptr(length) { + return nil, errors.Errorf("Frame is too short: %d", length) + } + + return (*UDPHeader)(unsafe.Pointer(uintptr(raw) - SizeOfUDPHeader)), nil +} diff --git a/protocol_numbers.csv b/protocol_numbers.csv deleted file mode 100644 index 21aba4e..0000000 --- a/protocol_numbers.csv +++ /dev/null @@ -1,173 +0,0 @@ -Decimal,Keyword,Protocol,IPv6 Extension Header,Reference -0,HOPOPT,IPv6 Hop-by-Hop Option,Y,[RFC2460] -1,ICMP,Internet Control Message,,[RFC792] -2,IGMP,Internet Group Management,,[RFC1112] -3,GGP,Gateway-to-Gateway,,[RFC823] -4,IPv4,IPv4 encapsulation,,[RFC2003] -5,ST,Stream,,[RFC1190][RFC1819] -6,TCP,Transmission Control,,[RFC793] -7,CBT,CBT,,[Tony_Ballardie] -8,EGP,Exterior Gateway Protocol,,[RFC888][David_Mills] -9,IGP,"any private interior gateway -(used by Cisco for their IGRP)",,[Internet_Assigned_Numbers_Authority] -10,BBN-RCC-MON,BBN RCC Monitoring,,[Steve_Chipman] -11,NVP-II,Network Voice Protocol,,[RFC741][Steve_Casner] -12,PUP,PUP,,"[Boggs, D., J. Shoch, E. Taft, and R. Metcalfe, ""PUP: An -Internetwork Architecture"", XEROX Palo Alto Research Center, -CSL-79-10, July 1979; also in IEEE Transactions on -Communication, Volume COM-28, Number 4, April 1980.][[XEROX]]" -13,ARGUS (deprecated),ARGUS,,[Robert_W_Scheifler] -14,EMCON,EMCON,,[] -15,XNET,Cross Net Debugger,,"[Haverty, J., ""XNET Formats for Internet Protocol Version 4"", -IEN 158, October 1980.][Jack_Haverty]" -16,CHAOS,Chaos,,[J_Noel_Chiappa] -17,UDP,User Datagram,,[RFC768][Jon_Postel] -18,MUX,Multiplexing,,"[Cohen, D. and J. Postel, ""Multiplexing Protocol"", IEN 90, -USC/Information Sciences Institute, May 1979.][Jon_Postel]" -19,DCN-MEAS,DCN Measurement Subsystems,,[David_Mills] -20,HMP,Host Monitoring,,[RFC869][Bob_Hinden] -21,PRM,Packet Radio Measurement,,[Zaw_Sing_Su] -22,XNS-IDP,XEROX NS IDP,,"[""The Ethernet, A Local Area Network: Data Link Layer and -Physical Layer Specification"", AA-K759B-TK, Digital -Equipment Corporation, Maynard, MA. Also as: ""The -Ethernet - A Local Area Network"", Version 1.0, Digital -Equipment Corporation, Intel Corporation, Xerox -Corporation, September 1980. And: ""The Ethernet, A Local -Area Network: Data Link Layer and Physical Layer -Specifications"", Digital, Intel and Xerox, November 1982. -And: XEROX, ""The Ethernet, A Local Area Network: Data Link -Layer and Physical Layer Specification"", X3T51/80-50, -Xerox Corporation, Stamford, CT., October 1980.][[XEROX]]" -23,TRUNK-1,Trunk-1,,[Barry_Boehm] -24,TRUNK-2,Trunk-2,,[Barry_Boehm] -25,LEAF-1,Leaf-1,,[Barry_Boehm] -26,LEAF-2,Leaf-2,,[Barry_Boehm] -27,RDP,Reliable Data Protocol,,[RFC908][Bob_Hinden] -28,IRTP,Internet Reliable Transaction,,[RFC938][Trudy_Miller] -29,ISO-TP4,ISO Transport Protocol Class 4,,[RFC905][] -30,NETBLT,Bulk Data Transfer Protocol,,[RFC969][David_Clark] -31,MFE-NSP,MFE Network Services Protocol,,"[Shuttleworth, B., ""A Documentary of MFENet, a National -Computer Network"", UCRL-52317, Lawrence Livermore Labs, -Livermore, California, June 1977.][Barry_Howard]" -32,MERIT-INP,MERIT Internodal Protocol,,[Hans_Werner_Braun] -33,DCCP,Datagram Congestion Control Protocol,,[RFC4340] -34,3PC,Third Party Connect Protocol,,[Stuart_A_Friedberg] -35,IDPR,Inter-Domain Policy Routing Protocol,,[Martha_Steenstrup] -36,XTP,XTP,,[Greg_Chesson] -37,DDP,Datagram Delivery Protocol,,[Wesley_Craig] -38,IDPR-CMTP,IDPR Control Message Transport Proto,,[Martha_Steenstrup] -39,TP++,TP++ Transport Protocol,,[Dirk_Fromhein] -40,IL,IL Transport Protocol,,[Dave_Presotto] -41,IPv6,IPv6 encapsulation,,[RFC2473] -42,SDRP,Source Demand Routing Protocol,,[Deborah_Estrin] -43,IPv6-Route,Routing Header for IPv6,Y,[Steve_Deering] -44,IPv6-Frag,Fragment Header for IPv6,Y,[Steve_Deering] -45,IDRP,Inter-Domain Routing Protocol,,[Sue_Hares] -46,RSVP,Reservation Protocol,,[RFC2205][RFC3209][Bob_Braden] -47,GRE,Generic Routing Encapsulation,,[RFC2784][Tony_Li] -48,DSR,Dynamic Source Routing Protocol,,[RFC4728] -49,BNA,BNA,,[Gary Salamon] -50,ESP,Encap Security Payload,Y,[RFC4303] -51,AH,Authentication Header,Y,[RFC4302] -52,I-NLSP,Integrated Net Layer Security TUBA,,[K_Robert_Glenn] -53,SWIPE (deprecated),IP with Encryption,,[John_Ioannidis] -54,NARP,NBMA Address Resolution Protocol,,[RFC1735] -55,MOBILE,IP Mobility,,[Charlie_Perkins] -56,TLSP,"Transport Layer Security Protocol -using Kryptonet key management",,[Christer_Oberg] -57,SKIP,SKIP,,[Tom_Markson] -58,IPv6-ICMP,ICMP for IPv6,,[RFC2460] -59,IPv6-NoNxt,No Next Header for IPv6,,[RFC2460] -60,IPv6-Opts,Destination Options for IPv6,Y,[RFC2460] -61,,any host internal protocol,,[Internet_Assigned_Numbers_Authority] -62,CFTP,CFTP,,"[Forsdick, H., ""CFTP"", Network Message, Bolt Beranek and -Newman, January 1982.][Harry_Forsdick]" -63,,any local network,,[Internet_Assigned_Numbers_Authority] -64,SAT-EXPAK,SATNET and Backroom EXPAK,,[Steven_Blumenthal] -65,KRYPTOLAN,Kryptolan,,[Paul Liu] -66,RVD,MIT Remote Virtual Disk Protocol,,[Michael_Greenwald] -67,IPPC,Internet Pluribus Packet Core,,[Steven_Blumenthal] -68,,any distributed file system,,[Internet_Assigned_Numbers_Authority] -69,SAT-MON,SATNET Monitoring,,[Steven_Blumenthal] -70,VISA,VISA Protocol,,[Gene_Tsudik] -71,IPCV,Internet Packet Core Utility,,[Steven_Blumenthal] -72,CPNX,Computer Protocol Network Executive,,[David Mittnacht] -73,CPHB,Computer Protocol Heart Beat,,[David Mittnacht] -74,WSN,Wang Span Network,,[Victor Dafoulas] -75,PVP,Packet Video Protocol,,[Steve_Casner] -76,BR-SAT-MON,Backroom SATNET Monitoring,,[Steven_Blumenthal] -77,SUN-ND,SUN ND PROTOCOL-Temporary,,[William_Melohn] -78,WB-MON,WIDEBAND Monitoring,,[Steven_Blumenthal] -79,WB-EXPAK,WIDEBAND EXPAK,,[Steven_Blumenthal] -80,ISO-IP,ISO Internet Protocol,,[Marshall_T_Rose] -81,VMTP,VMTP,,[Dave_Cheriton] -82,SECURE-VMTP,SECURE-VMTP,,[Dave_Cheriton] -83,VINES,VINES,,[Brian Horn] -84,TTP,Transaction Transport Protocol,,[Jim_Stevens] -84,IPTM,Internet Protocol Traffic Manager,,[Jim_Stevens] -85,NSFNET-IGP,NSFNET-IGP,,[Hans_Werner_Braun] -86,DGP,Dissimilar Gateway Protocol,,"[M/A-COM Government Systems, ""Dissimilar Gateway Protocol -Specification, Draft Version"", Contract no. CS901145, -November 16, 1987.][Mike_Little]" -87,TCF,TCF,,[Guillermo_A_Loyola] -88,EIGRP,EIGRP,,[RFC7868] -89,OSPFIGP,OSPFIGP,,[RFC1583][RFC2328][RFC5340][John_Moy] -90,Sprite-RPC,Sprite RPC Protocol,,"[Welch, B., ""The Sprite Remote Procedure Call System"", -Technical Report, UCB/Computer Science Dept., 86/302, -University of California at Berkeley, June 1986.][Bruce Willins]" -91,LARP,Locus Address Resolution Protocol,,[Brian Horn] -92,MTP,Multicast Transport Protocol,,[Susie_Armstrong] -93,AX.25,AX.25 Frames,,[Brian_Kantor] -94,IPIP,IP-within-IP Encapsulation Protocol,,[John_Ioannidis] -95,MICP (deprecated),Mobile Internetworking Control Pro.,,[John_Ioannidis] -96,SCC-SP,Semaphore Communications Sec. Pro.,,[Howard_Hart] -97,ETHERIP,Ethernet-within-IP Encapsulation,,[RFC3378] -98,ENCAP,Encapsulation Header,,[RFC1241][Robert_Woodburn] -99,,any private encryption scheme,,[Internet_Assigned_Numbers_Authority] -100,GMTP,GMTP,,[[RXB5]] -101,IFMP,Ipsilon Flow Management Protocol,,"[Bob_Hinden][November 1995, 1997.]" -102,PNNI,PNNI over IP,,[Ross_Callon] -103,PIM,Protocol Independent Multicast,,[RFC7761][Dino_Farinacci] -104,ARIS,ARIS,,[Nancy_Feldman] -105,SCPS,SCPS,,[Robert_Durst] -106,QNX,QNX,,[Michael_Hunter] -107,A/N,Active Networks,,[Bob_Braden] -108,IPComp,IP Payload Compression Protocol,,[RFC2393] -109,SNP,Sitara Networks Protocol,,[Manickam_R_Sridhar] -110,Compaq-Peer,Compaq Peer Protocol,,[Victor_Volpe] -111,IPX-in-IP,IPX in IP,,[CJ_Lee] -112,VRRP,Virtual Router Redundancy Protocol,,[RFC5798] -113,PGM,PGM Reliable Transport Protocol,,[Tony_Speakman] -114,,any 0-hop protocol,,[Internet_Assigned_Numbers_Authority] -115,L2TP,Layer Two Tunneling Protocol,,[RFC3931][Bernard_Aboba] -116,DDX,D-II Data Exchange (DDX),,[John_Worley] -117,IATP,Interactive Agent Transfer Protocol,,[John_Murphy] -118,STP,Schedule Transfer Protocol,,[Jean_Michel_Pittet] -119,SRP,SpectraLink Radio Protocol,,[Mark_Hamilton] -120,UTI,UTI,,[Peter_Lothberg] -121,SMP,Simple Message Protocol,,[Leif_Ekblad] -122,SM (deprecated),Simple Multicast Protocol,,[Jon_Crowcroft][draft-perlman-simple-multicast] -123,PTP,Performance Transparency Protocol,,[Michael_Welzl] -124,ISIS over IPv4,,,[Tony_Przygienda] -125,FIRE,,,[Criag_Partridge] -126,CRTP,Combat Radio Transport Protocol,,[Robert_Sautter] -127,CRUDP,Combat Radio User Datagram,,[Robert_Sautter] -128,SSCOPMCE,,,[Kurt_Waber] -129,IPLT,,,[[Hollbach]] -130,SPS,Secure Packet Shield,,[Bill_McIntosh] -131,PIPE,Private IP Encapsulation within IP,,[Bernhard_Petri] -132,SCTP,Stream Control Transmission Protocol,,[Randall_R_Stewart] -133,FC,Fibre Channel,,[Murali_Rajagopal][RFC6172] -134,RSVP-E2E-IGNORE,,,[RFC3175] -135,Mobility Header,,Y,[RFC6275] -136,UDPLite,,,[RFC3828] -137,MPLS-in-IP,,,[RFC4023] -138,manet,MANET Protocols,,[RFC5498] -139,HIP,Host Identity Protocol,Y,[RFC7401] -140,Shim6,Shim6 Protocol,Y,[RFC5533] -141,WESP,Wrapped Encapsulating Security Payload,,[RFC5840] -142,ROHC,Robust Header Compression,,[RFC5858] -143-252,,Unassigned,,[Internet_Assigned_Numbers_Authority] -253,,Use for experimentation and testing,Y,[RFC3692] -254,,Use for experimentation and testing,Y,[RFC3692] -255,Reserved,,,[Internet_Assigned_Numbers_Authority] diff --git a/routers.json.example b/routers.json.example deleted file mode 100644 index 3c4dba4..0000000 --- a/routers.json.example +++ /dev/null @@ -1,13 +0,0 @@ -{ - "router01.pop01": { - "id": "192.0.2.1", - "interfaces": { - "1": "lo", - "2": "eth0", - "3": "eth1", - "4": "eth2", - "5": "eth3" - } - - } -} diff --git a/sflow/decode.go b/sflow/decode.go new file mode 100644 index 0000000..4b1993a --- /dev/null +++ b/sflow/decode.go @@ -0,0 +1,236 @@ +// Copyright 2017 EXARING AG. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sflow + +import ( + "net" + "unsafe" + + "github.com/bio-routing/tflow2/convert" + "github.com/pkg/errors" + + log "github.com/sirupsen/logrus" +) + +const ( + dataFlowSample = 1 + expandedFlowSample = 3 + dataCounterSample = 2 + standardSflow = 0 + rawPacketHeader = 1 + extendedSwitchData = 1001 + extendedRouterData = 1002 +) + +// errorIncompatibleVersion prints an error message in case the detected version is not supported +func errorIncompatibleVersion(version uint32) error { + return errors.Errorf("Sflow: Incompatible protocol version v%d, only v5 is supported", version) +} + +// Decode is the main function of this package. It converts raw packet bytes to Packet struct. +func Decode(raw []byte, remote net.IP) (*Packet, error) { + data := convert.Reverse(raw) //TODO: Make it endian aware. This assumes a little endian machine + + pSize := len(data) + bufSize := 1500 + buffer := [1500]byte{} + + if pSize > bufSize { + panic("Buffer too small\n") + } + + // copy data into array as arrays allow us to cast the shit out of it + for i := 0; i < pSize; i++ { + buffer[bufSize-pSize+i] = data[i] + } + + bufferPtr := unsafe.Pointer(&buffer) + //bufferMinPtr := unsafe.Pointer(uintptr(bufferPtr) + uintptr(bufSize) - uintptr(pSize)) + headerPtr := unsafe.Pointer(uintptr(bufferPtr) + uintptr(bufSize) - uintptr(sizeOfHeaderTop)) + + var p Packet + p.Buffer = buffer[:] + p.headerTop = (*headerTop)(headerPtr) + + if p.headerTop.Version != 5 { + return nil, errorIncompatibleVersion(p.Header.Version) + } + + agentAddressLen := uint64(0) + switch p.headerTop.AgentAddressType { + default: + return nil, errors.Errorf("Unknown AgentAddressType %d", p.headerTop.AgentAddressType) + case 1: + agentAddressLen = 4 + case 2: + agentAddressLen = 16 + } + + headerBottomPtr := unsafe.Pointer(uintptr(bufferPtr) + uintptr(bufSize) - uintptr(sizeOfHeaderTop) - uintptr(agentAddressLen) - uintptr(sizeOfHeaderBottom)) + p.headerBottom = (*headerBottom)(headerBottomPtr) + + h := Header{ + Version: p.headerTop.Version, + AgentAddressType: p.headerTop.AgentAddressType, + AgentAddress: getNetIP(headerPtr, agentAddressLen), + SubAgentID: p.headerBottom.SubAgentID, + SequenceNumber: p.headerBottom.SequenceNumber, + SysUpTime: p.headerBottom.SysUpTime, + NumSamples: p.headerBottom.NumSamples, + } + p.Header = &h + + flowSamples, err := decodeFlows(headerBottomPtr, h.NumSamples) + if err != nil { + return nil, errors.Wrap(err, "Unable to dissect flows") + } + p.FlowSamples = flowSamples + + return &p, nil +} + +func extractEnterpriseFormat(sfType uint32) (sfTypeEnterprise uint32, sfTypeFormat uint32) { + return sfType >> 12, sfType & 0xfff +} + +func decodeFlows(samplesPtr unsafe.Pointer, NumSamples uint32) ([]*FlowSample, error) { + flowSamples := make([]*FlowSample, 0) + for i := uint32(0); i < NumSamples; i++ { + sfTypeEnterprise, sfTypeFormat := extractEnterpriseFormat(*(*uint32)(unsafe.Pointer(uintptr(samplesPtr) - uintptr(4)))) + + if sfTypeEnterprise != 0 { + return nil, errors.Errorf("Unknown Enterprise: %d", sfTypeEnterprise) + } + + sampleLengthPtr := unsafe.Pointer(uintptr(samplesPtr) - uintptr(8)) + sampleLength := *(*uint32)(sampleLengthPtr) + + if sfTypeFormat == dataFlowSample { + fs, err := decodeFlowSample(samplesPtr) + if err != nil { + return nil, errors.Wrap(err, "Unable to decode flow sample") + } + flowSamples = append(flowSamples, fs) + } else if sfTypeFormat == expandedFlowSample { + fs, err := decodeExpandedFlowSample(samplesPtr) + if err != nil { + return nil, errors.Wrap(err, "Unable to decode flow sample") + } + flowSamples = append(flowSamples, fs) + } + + samplesPtr = unsafe.Pointer(uintptr(samplesPtr) - uintptr(sampleLength+8)) + } + + return flowSamples, nil +} + +func decodeFlowSample(flowSamplePtr unsafe.Pointer) (*FlowSample, error) { + flowSamplePtr = unsafe.Pointer(uintptr(flowSamplePtr) - uintptr(sizeOfFlowSampleHeader)) + fsh := (*FlowSampleHeader)(flowSamplePtr) + + return _decodeFlowSample(flowSamplePtr, fsh) +} + +func decodeExpandedFlowSample(flowSamplePtr unsafe.Pointer) (*FlowSample, error) { + flowSamplePtr = unsafe.Pointer(uintptr(flowSamplePtr) - uintptr(sizeOfExpandedFlowSampleHeader)) + fsh := (*ExpandedFlowSampleHeader)(flowSamplePtr).toFlowSampleHeader() + + return _decodeFlowSample(flowSamplePtr, fsh) +} + +func _decodeFlowSample(flowSamplePtr unsafe.Pointer, fsh *FlowSampleHeader) (*FlowSample, error) { + var rph *RawPacketHeader + var rphd unsafe.Pointer + var erd *ExtendedRouterData + + for i := uint32(0); i < fsh.FlowRecord; i++ { + sfTypeEnterprise, sfTypeFormat := extractEnterpriseFormat(*(*uint32)(unsafe.Pointer(uintptr(flowSamplePtr) - uintptr(4)))) + flowDataLength := *(*uint32)(unsafe.Pointer(uintptr(flowSamplePtr) - uintptr(8))) + + if sfTypeEnterprise == standardSflow { + var err error + switch sfTypeFormat { + case rawPacketHeader: + rph = decodeRawPacketHeader(flowSamplePtr) + rphd = unsafe.Pointer(uintptr(flowSamplePtr) - sizeOfRawPacketHeader) + + case extendedRouterData: + erd, err = decodeExtendRouterData(flowSamplePtr) + if err != nil { + return nil, errors.Wrap(err, "Unable to decide extended router data") + } + + case extendedSwitchData: + + default: + log.Infof("Unknown sfTypeFormat %d\n", sfTypeFormat) + } + + } + + flowSamplePtr = unsafe.Pointer(uintptr(flowSamplePtr) - uintptr(8) - uintptr(flowDataLength)) + } + + fs := &FlowSample{ + FlowSampleHeader: fsh, + RawPacketHeader: rph, + Data: rphd, + DataLen: rph.OriginalPacketLength, + ExtendedRouterData: erd, + } + + return fs, nil +} + +func decodeRawPacketHeader(rphPtr unsafe.Pointer) *RawPacketHeader { + rphPtr = unsafe.Pointer(uintptr(rphPtr) - uintptr(sizeOfRawPacketHeader)) + rph := (*RawPacketHeader)(rphPtr) + return rph +} + +func decodeExtendRouterData(erhPtr unsafe.Pointer) (*ExtendedRouterData, error) { + erhTopPtr := unsafe.Pointer(uintptr(erhPtr) - uintptr(sizeOfextendedRouterDataTop)) + erhTop := (*extendedRouterDataTop)(erhTopPtr) + + addressLen := uint64(0) + switch erhTop.AddressType { + default: + return nil, errors.Errorf("Unknown AgentAddressType %d", erhTop.AddressType) + case 1: + addressLen = 4 + case 2: + addressLen = 16 + } + + erhBottomPtr := unsafe.Pointer(uintptr(erhTopPtr) - uintptr(sizeOfextendedRouterDataBottom) - uintptr(addressLen) - uintptr(sizeOfextendedRouterDataBottom)) + erhBottom := (*extendedRouterDataBottom)(erhBottomPtr) + + return &ExtendedRouterData{ + EnterpriseType: erhTop.EnterpriseType, + FlowDataLength: erhTop.FlowDataLength, + AddressType: erhTop.AddressType, + NextHop: getNetIP(unsafe.Pointer(uintptr(erhTopPtr)), addressLen), + NextHopSourceMask: erhBottom.NextHopSourceMask, + NextHopDestinationMask: erhBottom.NextHopDestinationMask, + }, nil +} + +func getNetIP(headerPtr unsafe.Pointer, addressLen uint64) net.IP { + ptr := unsafe.Pointer(uintptr(headerPtr) - uintptr(1)) + addr := make([]byte, addressLen) + for i := uint64(0); i < addressLen; i++ { + addr[i] = *(*byte)(unsafe.Pointer(uintptr(ptr) - uintptr(i))) + } + + return net.IP(addr) +} diff --git a/sflow/decode_test.go b/sflow/decode_test.go new file mode 100644 index 0000000..2c5b911 --- /dev/null +++ b/sflow/decode_test.go @@ -0,0 +1,132 @@ +// Copyright 2017 EXARING AG. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package sflow + +import ( + "fmt" + "net" + "testing" + + "github.com/bio-routing/tflow2/convert" +) + +func TestDecode(t *testing.T) { + s := []byte{0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 22, 0, 0, 0, 32, 0, 0, 0, 62, 190, 59, 194, 1, 0, 0, 0, 16, 0, 0, 0, 234, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 233, 3, 0, 0, 237, 199, 45, 191, 139, 110, 125, 230, 182, 29, 57, 172, 218, 131, 46, 119, 222, 169, 239, 221, 168, 115, 245, 18, 162, 61, 247, 165, 225, 137, 141, 210, 165, 115, 237, 171, 115, 10, 153, 41, 121, 49, 57, 188, 199, 201, 25, 85, 91, 144, 240, 211, 169, 192, 41, 161, 202, 222, 113, 99, 33, 78, 210, 92, 70, 28, 134, 39, 126, 255, 10, 8, 1, 1, 0, 0, 118, 202, 230, 1, 16, 128, 78, 151, 101, 60, 114, 24, 235, 218, 161, 4, 80, 0, 127, 251, 90, 95, 2, 153, 37, 185, 194, 50, 6, 63, 0, 64, 128, 86, 180, 5, 0, 69, 0, 8, 236, 43, 4, 113, 78, 32, 82, 114, 59, 217, 103, 216, 128, 0, 0, 0, 4, 0, 0, 0, 198, 5, 0, 0, 1, 0, 0, 0, 144, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 190, 2, 0, 0, 168, 2, 0, 0, 0, 0, 0, 0, 64, 127, 94, 90, 224, 3, 0, 0, 144, 2, 0, 0, 197, 164, 97, 81, 232, 0, 0, 0, 1, 0, 0, 0, 22, 0, 0, 0, + 32, 0, 0, 0, + 62, 190, 59, 194, // Next-Hop + 1, 0, 0, 0, // Address Family + 16, 0, 0, 0, // Flow Data Length + 234, 3, 0, 0, // Enterprise/Type (Extended router data) + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 233, 3, 0, 0, 75, 93, 7, 11, 45, 17, 165, 149, 120, 168, 247, 10, 136, 114, 169, 85, 104, 20, 124, 203, 71, 138, 96, 64, 49, 131, 198, 14, 182, 117, 228, 255, 19, 147, 111, 15, 10, 33, 225, 93, 118, 40, 164, 113, 66, 24, 150, 16, 218, 69, 118, 184, 150, 106, 186, 60, 41, 243, 231, 211, 233, 0, 131, 153, 43, 0, 3, 148, 69, 3, 10, 8, 1, 1, 0, 0, 233, 206, 130, 1, 16, 128, 172, 10, 7, 23, 40, 164, 166, 29, 62, 63, 80, 0, 43, 248, 17, 31, 4, 153, 37, 185, 46, 251, 6, 63, 0, 64, 174, 209, 180, 5, 0, 69, 0, 8, 236, 43, 4, 113, 78, 32, 82, 114, 59, 217, 103, 216, 128, 0, 0, 0, 4, 0, 0, 0, 198, 5, 0, 0, 1, 0, 0, 0, 144, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 190, 2, 0, 0, 170, 2, 0, 0, 0, 0, 0, 0, 96, 123, 94, 90, 224, 3, 0, 0, 144, 2, 0, 0, 196, 164, 97, 81, 232, 0, 0, 0, 1, 0, 0, 0, 14, 0, 0, 0, 32, 0, 0, 0, 57, 96, 89, 195, 1, 0, 0, 0, 16, 0, 0, 0, 234, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 233, 3, 0, 0, 215, 208, 48, 29, 1, 33, 28, 71, 110, 205, 210, 148, 225, 14, 237, 179, 197, 53, 4, 58, 246, 63, 228, 230, 166, 133, 111, 70, 124, 147, 240, 222, 21, 201, 13, 213, 140, 73, 144, 70, 156, 85, 47, 29, 86, 176, 195, 134, 78, 168, 63, 135, 252, 8, 80, 190, 183, 194, 133, 210, 26, 105, 239, 144, 29, 0, 2, 76, 160, 139, 10, 8, 1, 1, 0, 0, 167, 74, 239, 0, 16, 128, 210, 21, 9, 11, 29, 195, 141, 208, 244, 155, 80, 0, 91, 117, 210, 92, 4, 153, 37, 185, 251, 64, 6, 63, 0, 64, 209, 208, 212, 5, 0, 69, 0, 8, 188, 28, 4, 113, 78, 32, 3, 248, 103, 156, 181, 132, 128, 0, 0, 0, 4, 0, 0, 0, 230, 5, 0, 0, 1, 0, 0, 0, 144, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 149, 2, 0, 0, 170, 2, 0, 0, 0, 0, 0, 0, 96, 133, 157, 123, 224, 3, 0, 0, 149, 2, 0, 0, 116, 98, 15, 54, 232, 0, 0, 0, 1, 0, 0, 0, 10, 0, 0, 0, 32, 0, 0, 0, 33, 250, 157, 62, 1, 0, 0, 0, 16, 0, 0, 0, 234, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 16, 0, 0, 0, 233, 3, 0, 0, 193, 111, 105, 60, 190, 220, 121, 229, 158, 159, 65, 27, 79, 59, 89, 152, 153, 147, 249, 41, 34, 174, 115, 106, 7, 8, 148, 19, 165, 47, 135, 86, 42, 17, 129, 84, 254, 130, 222, 106, 42, 106, 209, 185, 205, 208, 71, 17, 126, 140, 32, 197, 254, 206, 15, 11, 174, 65, 151, 178, 9, 214, 21, 70, 123, 1, 217, 142, 46, 12, 10, 8, 1, 1, 0, 0, 80, 121, 23, 4, 16, 128, 116, 173, 164, 116, 56, 194, 157, 44, 176, 189, 80, 0, 246, 113, 186, 87, 3, 153, 37, 185, 75, 197, 6, 63, 0, 64, 255, 84, 212, 5, 0, 69, 0, 8, 185, 28, 4, 113, 78, 32, 148, 2, 127, 31, 113, 128, 128, 0, 0, 0, 4, 0, 0, 0, 230, 5, 0, 0, 1, 0, 0, 0, 144, 0, 0, 0, 1, 0, 0, 0, 3, 0, 0, 0, 146, 2, 0, 0, 171, 2, 0, 0, 0, 0, 0, 0, 128, 85, 79, 192, 224, 3, 0, 0, 146, 2, 0, 0, 211, 127, 173, 95, 232, 0, 0, 0, 1, 0, 0, 0, 10, 0, 0, 0, + 32, 0, 0, 0, + 33, 250, 157, 62, // Next-Hop + 1, 0, 0, 0, // Address Family + 16, 0, 0, 0, // Flow Data Length + 234, 3, 0, 0, // Enterprise/Type (Extended router data) + + 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 210, 0, 0, 0, 16, 0, 0, 0, 233, 3, 0, 0, 209, 50, 196, 16, 191, 134, 236, 166, 206, 27, 249, 140, 64, 231, 148, 246, 19, 88, 36, 9, 167, 240, 97, 133, 46, 175, 100, 47, 143, 160, 84, 35, 234, 71, 176, 116, 103, 119, 151, 133, 184, 52, 169, 202, 53, 231, 149, 40, 16, 81, 31, 242, 100, 122, 152, 78, 32, 133, 116, 22, 89, 122, 149, 27, 64, 0, 173, 248, 203, 199, 10, 8, 1, 1, 0, 0, 199, 212, 235, 0, 16, + 128, // Header Length + 92, 180, 133, 203, // ACK Number + 31, 4, 191, 24, // Sequence Number + 222, 148, // DST port + 80, 0, // SRC port + + 19, 131, 191, 87, // DST IP + 238, 153, 37, 185, // SRC IP + 186, 25, // Header Checksum + 6, // Protocol + 62, // TTL + 0, 64, // Flags + Fragment offset + 131, 239, // Identifier + 212, 5, // Total Length + 0, // TOS + 69, // Version + Length + + 0, 8, // EtherType + 185, 28, 4, 113, 78, 32, // Source MAC + 148, 2, 127, 31, 113, 128, // Destination MAC + + 128, 0, 0, 0, // Original Packet length + 4, 0, 0, 0, // Payload removed + 230, 5, 0, 0, // Frame length + 1, 0, 0, 0, // Header Protocol + 144, 0, 0, 0, // Flow Data Length + 1, 0, 0, 0, // Enterprise/Type + + 3, 0, 0, 0, // Flow Record count + 146, 2, 0, 0, // Output interface + 7, 2, 0, 0, // Input interface + 0, 0, 0, 0, // Dropped Packets + 160, 81, 79, 192, // Sampling Pool + 224, 3, 0, 0, // Sampling Rate + 146, 2, 0, 0, // Source ID + Index + 210, 127, 173, 95, // Sequence Number + 232, 0, 0, 0, // sample length + 1, 0, 0, 0, // Enterprise/Type + + 5, 0, 0, 0, // NumSamples + 111, 0, 0, 0, // SysUpTime + 222, 0, 0, 0, // Sequence Number + 0, 0, 0, 0, // Sub-AgentID + 14, 19, 205, 10, // Agent Address + 1, 0, 0, 0, // Agent Address Type + 5, 0, 0, 0, // Version + } + s = convert.Reverse(s) + + packet, err := Decode(s, net.IP([]byte{1, 1, 1, 1})) + if err != nil { + t.Errorf("Decoding packet failed: %v\n", err) + } + + if packet.Header.AgentAddress.String() != "10.205.19.14" { + t.Errorf("Incorrect AgentAddress: Exptected 10.205.19.14 got %s", packet.Header.AgentAddress.String()) + } +} + +func dump(packet *Packet) { + fmt.Printf("PACKET DUMP:\n") + for _, fs := range packet.FlowSamples { + if fs.ExtendedRouterData != nil { + fmt.Printf("Extended router data:\n") + fmt.Printf("Next-Hop: %s\n", fs.ExtendedRouterData.NextHop.String()) + } + if fs.RawPacketHeader != nil { + fmt.Printf("Raw packet header:\n") + fmt.Printf("OriginalPacketLength: %d\n", fs.RawPacketHeader.OriginalPacketLength) + fmt.Printf("Original Packet:\n") + } + } +} + +func testEq(a, b []byte) bool { + + if a == nil && b == nil { + return true + } + + if a == nil || b == nil { + return false + } + + if len(a) != len(b) { + return false + } + + for i := range a { + if a[i] != b[i] { + return false + } + } + + return true +} diff --git a/sflow/packet.go b/sflow/packet.go new file mode 100644 index 0000000..7fc60db --- /dev/null +++ b/sflow/packet.go @@ -0,0 +1,153 @@ +// Copyright 2017 EXARING AG. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +package sflow + +import ( + "net" + "unsafe" +) + +// Packet is a decoded representation of a single sflow UDP packet. +type Packet struct { + // A pointer to the packets headers + Header *Header + headerTop *headerTop + headerBottom *headerBottom + + // A slice of pointers to FlowSet. Each element is instance of (Data)FlowSet + FlowSamples []*FlowSample + + // Buffer is a slice pointing to the original byte array that this packet was decoded from. + // This field is only populated if debug level is at least 2 + Buffer []byte +} + +var ( + sizeOfHeaderTop = unsafe.Sizeof(headerTop{}) + sizeOfHeaderBottom = unsafe.Sizeof(headerBottom{}) + sizeOfFlowSampleHeader = unsafe.Sizeof(FlowSampleHeader{}) + sizeOfExpandedFlowSampleHeader = unsafe.Sizeof(ExpandedFlowSampleHeader{}) + sizeOfRawPacketHeader = unsafe.Sizeof(RawPacketHeader{}) + sizeofExtendedRouterData = unsafe.Sizeof(ExtendedRouterData{}) + sizeOfextendedRouterDataTop = unsafe.Sizeof(extendedRouterDataTop{}) + sizeOfextendedRouterDataBottom = unsafe.Sizeof(extendedRouterDataBottom{}) +) + +// Header is an sflow version 5 header +type Header struct { + Version uint32 + AgentAddressType uint32 + AgentAddress net.IP + SubAgentID uint32 + SequenceNumber uint32 + SysUpTime uint32 + NumSamples uint32 +} + +type headerTop struct { + AgentAddressType uint32 + Version uint32 +} + +type headerBottom struct { + NumSamples uint32 + SysUpTime uint32 + SequenceNumber uint32 + SubAgentID uint32 +} + +// FlowSample is an sflow version 5 flow sample +type FlowSample struct { + FlowSampleHeader *FlowSampleHeader + ExpandedFlowSampleHeader *ExpandedFlowSampleHeader + RawPacketHeader *RawPacketHeader + Data unsafe.Pointer + DataLen uint32 + ExtendedRouterData *ExtendedRouterData +} + +// FlowSampleHeader is an sflow version 5 flow sample header +type FlowSampleHeader struct { + FlowRecord uint32 + OutputIf uint32 + InputIf uint32 + DroppedPackets uint32 + SamplePool uint32 + SamplingRate uint32 + SourceIDClassIndex uint32 + SequenceNumber uint32 + SampleLength uint32 + EnterpriseType uint32 +} + +// ExpandedFlowSampleHeader is an sflow version 5 flow expanded sample header +type ExpandedFlowSampleHeader struct { + FlowRecord uint32 + OutputIf uint32 + _ uint32 + InputIf uint32 + _ uint32 + DroppedPackets uint32 + SamplePool uint32 + SamplingRate uint32 + SourceIDClassIndex uint32 + _ uint32 + SequenceNumber uint32 + SampleLength uint32 + EnterpriseType uint32 +} + +func (e *ExpandedFlowSampleHeader) toFlowSampleHeader() *FlowSampleHeader { + return &FlowSampleHeader{ + FlowRecord: e.FlowRecord, + OutputIf: e.OutputIf, + InputIf: e.InputIf, + DroppedPackets: e.DroppedPackets, + SamplePool: e.SamplePool, + SamplingRate: e.SamplingRate, + SourceIDClassIndex: e.SourceIDClassIndex, + SequenceNumber: e.SequenceNumber, + SampleLength: e.SampleLength, + EnterpriseType: e.EnterpriseType, + } +} + +// RawPacketHeader is a raw packet header +type RawPacketHeader struct { + OriginalPacketLength uint32 + PayloadRemoved uint32 + FrameLength uint32 + HeaderProtocol uint32 + FlowDataLength uint32 + EnterpriseType uint32 +} + +type extendedRouterDataTop struct { + AddressType uint32 + FlowDataLength uint32 + EnterpriseType uint32 +} + +type extendedRouterDataBottom struct { + NextHopDestinationMask uint32 + NextHopSourceMask uint32 +} + +// ExtendedRouterData represents sflow version 5 extended router data +type ExtendedRouterData struct { + NextHopDestinationMask uint32 + NextHopSourceMask uint32 + NextHop net.IP + AddressType uint32 + FlowDataLength uint32 + EnterpriseType uint32 +} diff --git a/sfserver/sfserver.go b/sfserver/sfserver.go new file mode 100644 index 0000000..585f1d7 --- /dev/null +++ b/sfserver/sfserver.go @@ -0,0 +1,297 @@ +// Copyright 2017 EXARING AG. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. + +// Package sfserver provides sflow collection services via UDP and passes flows into annotator layer +package sfserver + +import ( + "fmt" + "io" + "net" + "sync" + "sync/atomic" + "time" + "unsafe" + + "github.com/bio-routing/tflow2/config" + "github.com/bio-routing/tflow2/convert" + "github.com/bio-routing/tflow2/netflow" + "github.com/bio-routing/tflow2/packet" + "github.com/bio-routing/tflow2/sflow" + "github.com/bio-routing/tflow2/srcache" + "github.com/bio-routing/tflow2/stats" + "github.com/pkg/errors" + + log "github.com/sirupsen/logrus" +) + +// SflowServer represents a sflow Collector instance +type SflowServer struct { + // Output is the channel used to send flows to the annotator layer + Output chan *netflow.Flow + + // debug defines the debug level + debug int + + // bgpAugment is used to decide if ASN information from netflow packets should be used + bgpAugment bool + + // con is the UDP socket + conn *net.UDPConn + + wg sync.WaitGroup + + config *config.Config + + sampleRateCache *srcache.SamplerateCache +} + +// New creates and starts a new `SflowServer` instance +func New(numReaders int, config *config.Config, sampleRateCache *srcache.SamplerateCache) *SflowServer { + sfs := &SflowServer{ + Output: make(chan *netflow.Flow), + config: config, + sampleRateCache: sampleRateCache, + } + + addr, err := net.ResolveUDPAddr("udp", sfs.config.Sflow.Listen) + if err != nil { + panic(fmt.Sprintf("ResolveUDPAddr: %v", err)) + } + + con, err := net.ListenUDP("udp", addr) + if err != nil { + panic(fmt.Sprintf("Listen: %v", err)) + } + + // Create goroutines that read netflow packet and process it + for i := 0; i < numReaders; i++ { + sfs.wg.Add(numReaders) + go func(num int) { + sfs.packetWorker(num, con) + }(i) + } + + return sfs +} + +// Close closes the socket and stops the workers +func (sfs *SflowServer) Close() { + sfs.conn.Close() + sfs.wg.Wait() +} + +// packetWorker reads netflow packet from socket and handsoff processing to processFlowSets() +func (sfs *SflowServer) packetWorker(identity int, conn *net.UDPConn) { + buffer := make([]byte, 8960) + for { + length, remote, err := conn.ReadFromUDP(buffer) + if err == io.EOF { + break + } + if err != nil { + log.Errorf("Error reading from socket: %v", err) + continue + } + atomic.AddUint64(&stats.GlobalStats.SflowPackets, 1) + atomic.AddUint64(&stats.GlobalStats.SflowBytes, uint64(length)) + + remote.IP = remote.IP.To4() + if remote.IP == nil { + log.Errorf("Received IPv6 packet. Dropped.") + continue + } + + sfs.processPacket(remote.IP, buffer[:length]) + } + sfs.wg.Done() +} + +// processPacket takes a raw sflow packet, send it to the decoder and passes the decoded packet +func (sfs *SflowServer) processPacket(agent net.IP, buffer []byte) { + length := len(buffer) + p, err := sflow.Decode(buffer[:length], agent) + if err != nil { + log.Errorf("sflow.Decode: %v", err) + return + } + + for _, fs := range p.FlowSamples { + if fs.RawPacketHeader == nil { + log.Infof("Received sflow packet without raw packet header. Skipped.") + continue + } + + if fs.Data == nil { + log.Infof("Received sflow packet without raw packet header. Skipped.") + continue + } + + if fs.RawPacketHeader.HeaderProtocol != 1 { + log.Infof("Unknown header protocol: %d", fs.RawPacketHeader.HeaderProtocol) + continue + } + + ether, err := packet.DecodeEthernet(fs.Data, fs.RawPacketHeader.OriginalPacketLength) + if err != nil { + log.Infof("Unable to decode ether packet: %v", err) + continue + } + fs.Data = unsafe.Pointer(uintptr(fs.Data) - packet.SizeOfEthernetII) + fs.DataLen -= uint32(packet.SizeOfEthernetII) + + fl := &netflow.Flow{ + Router: agent, + IntIn: fs.FlowSampleHeader.InputIf, + IntOut: fs.FlowSampleHeader.OutputIf, + Size: uint64(fs.RawPacketHeader.FrameLength), + Packets: uint32(1), + Timestamp: time.Now().Unix(), + Samplerate: uint64(fs.FlowSampleHeader.SamplingRate), + } + + // We're updating the sampleCache to allow the forntend to show current sampling rates + sfs.sampleRateCache.Set(agent, uint64(fs.FlowSampleHeader.SamplingRate)) + + if fs.ExtendedRouterData != nil { + fl.NextHop = fs.ExtendedRouterData.NextHop + } + + sfs.processEthernet(ether.EtherType, fs, fl) + + if fl.Family >= 0 { + if fl.Family == 4 { + atomic.AddUint64(&stats.GlobalStats.Flows4, 1) + } else if fl.Family == 6 { + atomic.AddUint64(&stats.GlobalStats.Flows6, 1) + } else { + log.Warning("Unknown address family") + continue + } + } + + sfs.Output <- fl + } +} + +func (sfs *SflowServer) processEthernet(ethType uint16, fs *sflow.FlowSample, fl *netflow.Flow) { + if ethType == packet.EtherTypeIPv4 { + sfs.processIPv4Packet(fs, fl) + } else if ethType == packet.EtherTypeIPv6 { + sfs.processIPv6Packet(fs, fl) + } else if ethType == packet.EtherTypeARP || ethType == packet.EtherTypeLACP { + return + } else if ethType == packet.EtherTypeIEEE8021Q { + sfs.processDot1QPacket(fs, fl) + } else { + log.Errorf("Unknown EtherType: 0x%x", ethType) + } +} + +func (sfs *SflowServer) processDot1QPacket(fs *sflow.FlowSample, fl *netflow.Flow) { + dot1q, err := packet.DecodeDot1Q(fs.Data, fs.DataLen) + if err != nil { + log.Errorf("Unable to decode dot1q header: %v", err) + } + fs.Data = unsafe.Pointer(uintptr(fs.Data) - packet.SizeOfDot1Q) + fs.DataLen -= uint32(packet.SizeOfDot1Q) + + sfs.processEthernet(dot1q.EtherType, fs, fl) +} + +func (sfs *SflowServer) processIPv4Packet(fs *sflow.FlowSample, fl *netflow.Flow) { + fl.Family = 4 + ipv4, err := packet.DecodeIPv4(fs.Data, fs.DataLen) + if err != nil { + log.Errorf("Unable to decode IPv4 packet: %v", err) + } + fs.Data = unsafe.Pointer(uintptr(fs.Data) - packet.SizeOfIPv4Header) + fs.DataLen -= uint32(packet.SizeOfIPv4Header) + + fl.SrcAddr = convert.Reverse(ipv4.SrcAddr[:]) + fl.DstAddr = convert.Reverse(ipv4.DstAddr[:]) + fl.Protocol = uint32(ipv4.Protocol) + switch ipv4.Protocol { + case packet.TCP: + if err := getTCP(fs.Data, fs.DataLen, fl); err != nil { + log.Errorf("%v", err) + } + case packet.UDP: + if err := getUDP(fs.Data, fs.DataLen, fl); err != nil { + log.Errorf("%v", err) + } + } +} + +func (sfs *SflowServer) processIPv6Packet(fs *sflow.FlowSample, fl *netflow.Flow) { + fl.Family = 6 + ipv6, err := packet.DecodeIPv6(fs.Data, fs.DataLen) + if err != nil { + log.Errorf("Unable to decode IPv6 packet: %v", err) + } + fs.Data = unsafe.Pointer(uintptr(fs.Data) - packet.SizeOfIPv6Header) + fs.DataLen -= uint32(packet.SizeOfIPv6Header) + + fl.SrcAddr = convert.Reverse(ipv6.SrcAddr[:]) + fl.DstAddr = convert.Reverse(ipv6.DstAddr[:]) + fl.Protocol = uint32(ipv6.NextHeader) + switch ipv6.NextHeader { + case packet.TCP: + if err := getTCP(fs.Data, fs.DataLen, fl); err != nil { + log.Errorf("%v", err) + } + case packet.UDP: + if err := getUDP(fs.Data, fs.DataLen, fl); err != nil { + log.Errorf("%v", err) + } + } +} + +func getUDP(udpPtr unsafe.Pointer, length uint32, fl *netflow.Flow) error { + udp, err := packet.DecodeUDP(udpPtr, length) + if err != nil { + return errors.Wrap(err, "Unable to decode UDP datagram") + } + + fl.SrcPort = uint32(udp.SrcPort) + fl.DstPort = uint32(udp.DstPort) + + return nil +} + +func getTCP(tcpPtr unsafe.Pointer, length uint32, fl *netflow.Flow) error { + tcp, err := packet.DecodeTCP(tcpPtr, length) + if err != nil { + return errors.Wrap(err, "Unable to decode TCP segment") + } + + fl.SrcPort = uint32(tcp.SrcPort) + fl.DstPort = uint32(tcp.DstPort) + + return nil +} + +// Dump dumps a flow on the screen +func Dump(fl *netflow.Flow) { + fmt.Printf("--------------------------------\n") + fmt.Printf("Flow dump:\n") + fmt.Printf("Router: %d\n", fl.Router) + fmt.Printf("Family: %d\n", fl.Family) + fmt.Printf("SrcAddr: %s\n", net.IP(fl.SrcAddr).String()) + fmt.Printf("DstAddr: %s\n", net.IP(fl.DstAddr).String()) + fmt.Printf("Protocol: %d\n", fl.Protocol) + fmt.Printf("NextHop: %s\n", net.IP(fl.NextHop).String()) + fmt.Printf("IntIn: %d\n", fl.IntIn) + fmt.Printf("IntOut: %d\n", fl.IntOut) + fmt.Printf("Packets: %d\n", fl.Packets) + fmt.Printf("Bytes: %d\n", fl.Size) + fmt.Printf("--------------------------------\n") +} diff --git a/sfserver/sfserver_test.go b/sfserver/sfserver_test.go new file mode 100644 index 0000000..cc7ea01 --- /dev/null +++ b/sfserver/sfserver_test.go @@ -0,0 +1,148 @@ +// Copyright 2017 EXARING AG. All Rights Reserved. +// Licensed under the Apache License, Version 2.0 (the "License"); +// you may not use this file except in compliance with the License. +// You may obtain a copy of the License at +// http://www.apache.org/licenses/LICENSE-2.0 +// Unless required by applicable law or agreed to in writing, software +// distributed under the License is distributed on an "AS IS" BASIS, +// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +// See the License for the specific language governing permissions and +// limitations under the License. +package sfserver + +import ( + "fmt" + "net" + "testing" + "unsafe" + + "github.com/bio-routing/tflow2/convert" + "github.com/bio-routing/tflow2/packet" + "github.com/bio-routing/tflow2/sflow" + + log "github.com/sirupsen/logrus" +) + +func TestIntegration(t *testing.T) { + s := []byte{ + 10, 0, 0, 0, // Destination Mask + 32, 0, 0, 0, // Source Mask + 33, 250, 157, 62, // Next-Hop + 1, 0, 0, 0, // Address Family + 16, 0, 0, 0, // Flow Data Length + 234, 3, 0, 0, // Enterprise/Type (Extended router data) + + 0, 0, 0, 0, // Priority OUT + 0, 0, 0, 0, // VLAN OUT + 0, 0, 0, 0, // Priority IN + 210, 0, 0, 0, // VLAN IN + 16, 0, 0, 0, // Flow Data Length + 233, 3, 0, 0, // Enterprise/Type (Extended switch data) + + 209, 50, 196, 16, 191, 134, 236, 166, 206, 27, 249, 140, 64, 231, 148, 246, 19, 88, 36, 9, 167, 240, 97, 133, 46, 175, 100, 47, 143, 160, 84, 35, 234, 71, 176, 116, 103, 119, 151, 133, 184, 52, 169, 202, 53, 231, 149, 40, 16, 81, 31, 242, 100, 122, 152, 78, 32, 133, 116, 22, 89, 122, 149, 27, 64, 0, 173, 248, 203, 199, 10, 8, 1, 1, 0, 0, 199, + 212, 235, 0, 16, + 128, // Header Length + 92, 180, 133, 203, // ACK Number + 31, 4, 191, 24, // Sequence Number + 222, 148, // DST port + 80, 0, // SRC port + + 19, 131, 191, 87, // DST IP + 238, 153, 37, 185, // SRC IP + 186, 25, // Header Checksum + 6, // Protocol + 62, // TTL + 0, 64, // Flags + Fragment offset + 131, 239, // Identifier + 212, 5, // Total Length + 0, // TOS + 69, // Version + Length + + 0, 8, // EtherType + 0xb9, 0x1c, 0x04, 0x71, 0x4e, 0x20, // Source MAC + 0x94, 0x02, 0x7f, 0x1f, 0x71, 0x80, // Destination MAC + + 128, 0, 0, 0, // Original Packet length (92 Bytes until here, incl.) + 4, 0, 0, 0, // Payload removed + 230, 5, 0, 0, // Frame length + 1, 0, 0, 0, // Header Protocol + 144, 0, 0, 0, // Flow Data Length + 1, 0, 0, 0, // Enterprise/Type (Raw packet header) + + 3, 0, 0, 0, // Flow Record count + 146, 2, 0, 0, // Output interface + 7, 2, 0, 0, // Input interface + 0, 0, 0, 0, // Dropped Packets + 160, 81, 79, 192, // Sampling Pool + 224, 3, 0, 0, // Sampling Rate + 146, 2, 0, 0, // Source ID + Index + 210, 127, 173, 95, // Sequence Number + 232, 0, 0, 0, // sample length + 1, 0, 0, 0, // Enterprise/Type + + 1, 0, 0, 0, // NumSamples + 111, 0, 0, 0, // SysUpTime + 222, 0, 0, 0, // Sequence Number + 0, 0, 0, 0, // Sub-AgentID + 14, 19, 205, 10, // Agent Address + 1, 0, 0, 0, // Agent Address Type + 5, 0, 0, 0, // Version + } + s = convert.Reverse(s) + + p, err := sflow.Decode(s, net.IP([]byte{1, 1, 1, 1})) + if err != nil { + t.Errorf("Decoding packet failed: %v\n", err) + } + + for _, fs := range p.FlowSamples { + if fs.RawPacketHeader == nil { + log.Infof("Received sflow packet without raw packet header. Skipped.") + continue + } + + ether, err := packet.DecodeEthernet(fs.Data, fs.RawPacketHeader.OriginalPacketLength) + if err != nil { + log.Infof("Unable to decode ether packet: %v", err) + continue + } + + if ether.DstMAC.String() != "80:71:1f:7f:02:94" { + t.Errorf("Unexpected DST MAC address. Expected %s. Got %s", "80:71:1f:7f:02:94", ether.DstMAC.String()) + } + + if ether.SrcMAC.String() != "20:4e:71:04:1c:b9" { + t.Errorf("Unexpected SRC MAC address. Expected %s. Got %s", "20:4e:71:04:1c:b9", ether.SrcMAC.String()) + } + + if fs.RawPacketHeader.HeaderProtocol == 1 { + ipv4Ptr := unsafe.Pointer(uintptr(fs.Data) - packet.SizeOfEthernetII) + ipv4, err := packet.DecodeIPv4(ipv4Ptr, fs.RawPacketHeader.OriginalPacketLength-uint32(packet.SizeOfEthernetII)) + if err != nil { + t.Errorf("Unable to decode IPv4 packet: %v", err) + } + + convert.Reverse(ipv4.SrcAddr[:]) + if net.IP(ipv4.SrcAddr[:]).String() != "185.37.153.238" { + t.Errorf("Wrong IPv4 src address: Got %v. Expected %v", net.IP(convert.Reverse(ipv4.SrcAddr[:])).String(), "185.37.153.238") + } + + fmt.Printf("IPv4 SRC: %s\n", net.IP(ipv4.SrcAddr[:]).String()) + + if ipv4.Protocol == 6 { + tcpPtr := unsafe.Pointer(uintptr(ipv4Ptr) - packet.SizeOfIPv4Header) + tcp, err := packet.DecodeTCP(tcpPtr, fs.RawPacketHeader.OriginalPacketLength-uint32(packet.SizeOfEthernetII)-uint32(packet.SizeOfIPv4Header)) + if err != nil { + t.Errorf("Unable to decode TCP segment: %v", err) + } + fmt.Printf("SRC PORT: %d\n", tcp.SrcPort) + fmt.Printf("DST PORT: %d\n", tcp.DstPort) + } else { + t.Errorf("Unknown IP protocol: %d\n", ipv4.Protocol) + } + + } else { + t.Errorf("Unknown HeaderProtocol: %d", fs.RawPacketHeader.HeaderProtocol) + } + } +} diff --git a/srcache/srcache.go b/srcache/srcache.go new file mode 100644 index 0000000..3bca44d --- /dev/null +++ b/srcache/srcache.go @@ -0,0 +1,48 @@ +package srcache + +import ( + "net" + "sync" + + "github.com/bio-routing/tflow2/config" +) + +// SamplerateCache caches information about samplerates +type SamplerateCache struct { + cache map[string]uint64 + mu sync.RWMutex +} + +// New creates a new SamplerateCache and initializes it with values from the config +func New(agents []config.Agent) *SamplerateCache { + c := &SamplerateCache{ + cache: make(map[string]uint64), + } + + // Initialize cache with configured samplerates + for _, a := range agents { + c.Set(net.ParseIP(a.IPAddress), a.SampleRate) + } + + return c +} + +// Set updates a cache entry +func (s *SamplerateCache) Set(rtr net.IP, rate uint64) { + s.mu.Lock() + defer s.mu.Unlock() + + s.cache[string(rtr)] = rate +} + +// Get gets a cache entry +func (s *SamplerateCache) Get(rtr net.IP) uint64 { + s.mu.RLock() + defer s.mu.RUnlock() + + if _, ok := s.cache[string(rtr)]; !ok { + return 1 + } + + return s.cache[string(rtr)] +} diff --git a/stats/stats.go b/stats/stats.go index f31b52a..54afbab 100644 --- a/stats/stats.go +++ b/stats/stats.go @@ -33,6 +33,8 @@ type Stats struct { Netflow9bytes uint64 IPFIXpackets uint64 IPFIXbytes uint64 + SflowPackets uint64 + SflowBytes uint64 } // GlobalStats is instance of `Stats` to keep stats of this program @@ -43,8 +45,8 @@ func Init() { GlobalStats.StartTime = time.Now().Unix() } -// Varz is used to serve HTTP requests /varz and send the statistics to a client in borgmon/prometheus compatible format -func Varz(w http.ResponseWriter) { +// Metrics is used to serve HTTP requests /metrics and send the statistics to a client in borgmon/prometheus compatible format +func Metrics(w http.ResponseWriter) { now := time.Now().Unix() fmt.Fprintf(w, "netflow_collector_uptime %d\n", now-GlobalStats.StartTime) fmt.Fprintf(w, "netflow_collector_flows4 %d\n", atomic.LoadUint64(&GlobalStats.Flows4)) @@ -58,4 +60,11 @@ func Varz(w http.ResponseWriter) { fmt.Fprintf(w, "netflow_collector_netflow9_bytes %d\n", atomic.LoadUint64(&GlobalStats.Netflow9bytes)) fmt.Fprintf(w, "netflow_collector_ipfix_packets %d\n", atomic.LoadUint64(&GlobalStats.IPFIXpackets)) fmt.Fprintf(w, "netflow_collector_ipfix_bytes %d\n", atomic.LoadUint64(&GlobalStats.IPFIXbytes)) + fmt.Fprintf(w, "netflow_collector_sflow_packets %d\n", atomic.LoadUint64(&GlobalStats.SflowPackets)) + fmt.Fprintf(w, "netflow_collector_sflow_bytes %d\n", atomic.LoadUint64(&GlobalStats.SflowBytes)) + routerStats(w) +} + +func routerStats(w http.ResponseWriter) { + } diff --git a/tflow2.css b/tflow2.css index 072eeba..78779d4 100644 --- a/tflow2.css +++ b/tflow2.css @@ -26,13 +26,6 @@ header { border-bottom-style: solid; border-bottom-color: rgb(255, 203, 5); } -form { - padding: 0; - margin: 0; - border: 0; - width: 0; - height: 0; -} h1 { color: #dc0067; padding: 5px; @@ -71,7 +64,7 @@ h1 { fieldset { margin-bottom: 15px; } -#fs_container { +form { background-color: #cecece; padding-bottom: 5px; } diff --git a/tflow2.go b/tflow2.go index b105cd0..164b406 100644 --- a/tflow2.go +++ b/tflow2.go @@ -1,4 +1,4 @@ -// Copyright 2017 Google Inc. All Rights Reserved. +// Copyright 2017 Google Inc, EXARING AG. All Rights Reserved. // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at @@ -14,57 +14,113 @@ package main import ( "flag" + "os" "runtime" "sync" + "time" - "github.com/google/tflow2/annotator" - "github.com/google/tflow2/database" - "github.com/google/tflow2/frontend" - "github.com/google/tflow2/ifserver" - "github.com/google/tflow2/netflow" - "github.com/google/tflow2/nfserver" - "github.com/google/tflow2/stats" + "github.com/bio-routing/tflow2/annotation" + "github.com/bio-routing/tflow2/config" + "github.com/bio-routing/tflow2/database" + "github.com/bio-routing/tflow2/frontend" + "github.com/bio-routing/tflow2/iana" + "github.com/bio-routing/tflow2/ifserver" + "github.com/bio-routing/tflow2/intfmapper" + "github.com/bio-routing/tflow2/netflow" + "github.com/bio-routing/tflow2/nfserver" + "github.com/bio-routing/tflow2/sfserver" + "github.com/bio-routing/tflow2/srcache" + "github.com/bio-routing/tflow2/stats" + + log "github.com/sirupsen/logrus" ) var ( - nfAddr = flag.String("netflow", ":2055", "Address to use to receive netflow packets") - ipfixAddr = flag.String("ipfix", ":4739", "Address to use to receive ipfix packets") - aggregation = flag.Int64("aggregation", 60, "Time to groups flows together into one data point") - maxAge = flag.Int64("maxage", 1800, "Maximum age of saved flows") - web = flag.String("web", ":4444", "Address to use for web service") - birdSock = flag.String("birdsock", "/var/run/bird/bird.ctl", "Unix domain socket to communicate with BIRD") - birdSock6 = flag.String("birdsock6", "/var/run/bird/bird6.ctl", "Unix domain socket to communicate with BIRD6") - bgpAugment = flag.Bool("bgp", true, "Use BIRD to augment BGP flow information") protoNums = flag.String("protonums", "protocol_numbers.csv", "CSV file to read protocol definitions from") sockReaders = flag.Int("sockreaders", 24, "Num of go routines reading and parsing netflow packets") channelBuffer = flag.Int("channelbuffer", 1024, "Size of buffer for channels") dbAddWorkers = flag.Int("dbaddworkers", 24, "Number of workers adding flows into database") nAggr = flag.Int("numaggr", 12, "Number of flow aggregator workers") - samplerate = flag.Int("samplerate", 1, "Samplerate of routers") - debugLevel = flag.Int("debug", 0, "Debug level, 0: none, 1: +shows if we are receiving flows we are lacking templates for, 2: -, 3: +dump all packets on screen") - compLevel = flag.Int("comp", 6, "gzip compression level for data storage on disk") - dataDir = flag.String("data", "./data", "Path to store long term flow logs") - anonymize = flag.Bool("anonymize", false, "Replace IP addresses with NULL before dumping flows to disk") + + configFile = flag.String("config", "config.yml", "tflow2 configuration file") ) func main() { - flag.Parse() runtime.GOMAXPROCS(runtime.NumCPU()) - stats.Init() + flag.Parse() + + cfg, err := config.New(*configFile) + if err != nil { + log.Errorf("Unable to get configuration: %v", err) + os.Exit(1) + } - nfs := nfserver.New(*nfAddr, *sockReaders, *bgpAugment, *debugLevel) + // Initialize statistics module + stats.Init() - ifs := ifserver.New(*ipfixAddr, *sockReaders, *bgpAugment, *debugLevel) + inftMapper, err := intfmapper.New(cfg.Agents, cfg.AggregationPeriod, time.Duration(cfg.InterfaceMapperRefreshPeriod)*time.Second) + if err != nil { + log.Errorf("Unable to initialize interface mappper: %v", err) + os.Exit(1) + } chans := make([]chan *netflow.Flow, 0) - chans = append(chans, nfs.Output) - chans = append(chans, ifs.Output) - flowDB := database.New(*aggregation, *maxAge, *dbAddWorkers, *samplerate, *debugLevel, *compLevel, *dataDir, *anonymize) + // Sample Rate Cache + srcache := srcache.New(cfg.Agents) + + // Netflow v9 Server + if *cfg.NetflowV9.Enabled { + nfs := nfserver.New(*sockReaders, cfg, srcache) + chans = append(chans, nfs.Output) + } + + // IPFIX Server + if *cfg.IPFIX.Enabled { + ifs := ifserver.New(*sockReaders, cfg, srcache) + chans = append(chans, ifs.Output) + } + + // sFlow Server + if *cfg.Sflow.Enabled { + sfs := sfserver.New(*sockReaders, cfg, srcache) + chans = append(chans, sfs.Output) + } + + // Get IANA instance + iana := iana.New() + + // Start the database layer + flowDB := database.New( + cfg.AggregationPeriod, + *cfg.CacheTime, + *dbAddWorkers, + cfg.Debug, + *cfg.CompressionLevel, + cfg.DataDir, + cfg.Anonymize, + inftMapper, + cfg.AgentsNameByIP, + iana, + ) - annotator.New(chans, flowDB.Input, *nAggr, *aggregation, *bgpAugment, *birdSock, *birdSock6, *debugLevel) + // Start the annotation layer + annotation.New( + chans, + flowDB.Input, + *nAggr, + cfg, + ) - frontend.New(*web, *protoNums, flowDB) + // Frontend + if *cfg.Frontend.Enabled { + frontend.New( + flowDB, + inftMapper, + iana, + cfg, + ) + } var wg sync.WaitGroup wg.Add(1) diff --git a/tflow2.html b/tflow2.html index f952f8f..09aae9e 100644 --- a/tflow2.html +++ b/tflow2.html @@ -15,7 +15,8 @@ tflow Netflow Analyzer - + + @@ -24,145 +25,138 @@

tflow Netflow Analyzer

-
- - -
-
+
Netflow Query
Filter
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
Breakdown
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
- - + +
-
+
\ No newline at end of file diff --git a/tflow2.js b/tflow2.js index 6869312..39aaba5 100644 --- a/tflow2.js +++ b/tflow2.js @@ -13,159 +13,136 @@ var query; var protocols; var availableProtocols = []; var rtrs; -var routers = []; +var agents = []; var interfaces = []; -const OpEqual = 0; -const OpUnequal = 1; -const OpSmaller = 2; -const OpGreater = 3; -const FieldTimestamp = 0; -const FieldRouter = 1; -const FieldSrcAddr = 2; -const FieldDstAddr = 3; -const FieldProtocol = 4; -const FieldIntIn = 5; -const FieldIntOut = 6; -const FieldNextHop = 7; -const FieldSrcAs = 8; -const FieldDstAs = 9; -const FieldNextHopAs = 10; -const FieldSrcPfx = 11; -const FieldDstPfx = 12; -const FieldSrcPort = 13; -const FieldDstPort = 14; -const fields = { - "Router": 1, - "SrcAddr": 2, - "DstAddr": 3, - "Protocol": 4, - "IntIn": 5, - "IntOut": 6, - "NextHop": 7, - "SrcAsn": 8, - "DstAsn": 9, - "NextHopAsn": 10, - "SrcPfx": 11, - "DstPfx": 12, - "SrcPort": 13, - "DstPort": 14, -}; -const fieldById = { - "1": "Router", - "2": "SrcAddr", - "3": "DstAddr", - "4": "Protocol", - "5": "IntIn", - "6": "IntOut", - "7": "NextHop", - "8": "SrcAsn", - "9": "DstAsn", - "10": "NextHopAsn", - "11": "SrcPfx", - "12": "DstPfx", - "13": "SrcPort", - "14": "DstPort" -}; - -var bdfields = [ - "SrcAddr", "DstAddr", "Protocol", "IntIn", "IntOut", "NextHop", "SrcAsn", "DstAsn", - "NextHopAsn", "SrcPfx", "DstPfx", "SrcPort", "DstPort" ]; function drawChart() { - var query = $("#query").val(); - if (query == "" || query == "{}") { + var query = location.href.split("#")[1] + if (!query) { return; } - var url = "/query?q=" + encodeURI(query) - console.log(url); - $.get(url, function(rdata) { - console.log(rdata); - d = []; - d = JSON.parse(rdata); - data = google.visualization.arrayToDataTable(d); - - var options = { - isStacked: true, - title: 'NetFlow bps of top flows', - hAxis: { - title: 'Time', - titleTextStyle: { - color: '#333' + $.ajax({ + type: "GET", + url: "/query?" + query, + dataType: "text", + success: function(rdata, status, xhr) { + if (rdata == undefined) { + $("#chart_div").text("No data found") + return + } + renderChart(rdata) + }, + error: function(xhr) { + $("#chart_div").text(xhr.responseText) + } + }) +} + +function renderChart(rdata) { + pres = Papa.parse(rdata.trim()) + + var data = []; + for (var i = 0; i < pres.data.length; i++) { + for (var j = 0; j < pres.data[i].length; j++) { + if (j == 0) { + data[i] = []; + } + x = pres.data[i][j]; + if (i != 0) { + if (j != 0) { + x = parseInt(x) } - }, - vAxis: { - minValue: 0 } - }; + data[i][j] = x; + } + } - var chart = new google.visualization.AreaChart(document.getElementById('chart_div')); - chart.draw(data, options); - }); + data = google.visualization.arrayToDataTable(data); + + var options = { + isStacked: true, + title: 'NetFlow bps of top flows', + hAxis: { + title: 'Time', + titleTextStyle: { + color: '#333' + } + }, + vAxis: { + minValue: 0 + } + }; + + new google.visualization.AreaChart(document.getElementById('chart_div')).draw(data, options); +} + +// source: https://stackoverflow.com/a/26849194 +function parseParams(str) { + return str.split('&').reduce(function (params, param) { + var paramSplit = param.split('=').map(function (value) { + return decodeURIComponent(value.replace('+', ' ')); + }); + params[paramSplit[0]] = paramSplit[1]; + return params; + }, {}); } function populateForm() { - var q = $("#query").val(); - if (q == "" || q == "{}") { + var query = location.href.split("#")[1] + if (!query) { return; } - q = JSON.parse(q); - $("#topn").val(q.TopN); - for (var c in q['Cond']) { - var fieldNum = q['Cond'][c]['Field']; - var fieldName = fieldById[fieldNum]; - var operand = q['Cond'][c]['Operand']; - if (fieldNum == FieldRouter) { - operand = getRouterById(operand); - if (operand == null) { - return; - } - } else if (fieldNum == FieldIntIn || fieldNum == FieldIntOut) { - operand = getInterfaceById($("#Router").val(), operand); - if (operand == null) { - return; - } - } else if (fieldNum == FieldProtocol) { - operand = protocols[operand]; - if (operand == null) { - return; + var params = parseParams(query); + + for (var key in params) { + var value = params[key] + + if (key.match(/^Timestamp/)){ + timezoneOffset = (new Date()).getTimezoneOffset()*60 + value = formatTimestamp(new Date((parseInt(value) - timezoneOffset )*1000)) + } else if (key == "Breakdown") { + var breakdown = value.split(",") + for (var i in breakdown) { + $("#bd"+breakdown[i]).attr("checked", true) + continue } } - $("#" + fieldName).val(operand); + $("#" + key.replace(".","_")).val(value); } loadInterfaceOptions(); - - for (var f in q['Breakdown']) { - $("#bd" + f).prop( "checked", true ); - } } function loadInterfaceOptions() { - var rtr = $("#Router").val(); + var rtr = $("#Agent").val(); interfaces = []; - if (!rtrs[rtr]) { - return; - } - for (var k in rtrs[rtr]["interfaces"]) { - interfaces.push(rtrs[rtr]["interfaces"][k]); + for (var k in rtrs.Agents) { + if (rtrs.Agents[k].Name != rtr) { + continue + } + + for (var l in rtrs.Agents[k].Interfaces) { + interfaces.push(rtrs.Agents[k].Interfaces[l]); + } + } - $("#IntIn").autocomplete({ + $("#IntInName").autocomplete({ source: interfaces }); - $("#IntOut").autocomplete({ + $("#IntOutName").autocomplete({ source: interfaces }); } function loadProtocols() { - return $.get("/protocols", function(rdata) { - protocols = JSON.parse(rdata); + return $.getJSON("/protocols", function(data) { + protocols = data; for (var k in protocols) { - availableProtocols.push(protocols[k]); + availableProtocols.push(k); } $("#Protocol").autocomplete({ @@ -174,138 +151,84 @@ function loadProtocols() { }); } -function loadRouters() { - return $.get("/routers", function(rdata) { - rtrs = JSON.parse(rdata); - for (var k in rtrs) { - routers.push(k); +function loadAgents() { + return $.getJSON("/agents", function(data) { + rtrs = data; + for (var k in data.Agents) { + agents.push(data.Agents[k].Name); } - $("#Router").autocomplete({ - source: routers, + $("#Agent").autocomplete({ + source: agents, change: function() { loadInterfaceOptions(); } }); + }); } +function formatTimestamp(date) { + return date.toISOString().substr(0, 16) +} + $(document).ready(function() { - var start = new Date(((new Date() / 1000) - 900)* 1000).toISOString().substr(0, 16) - if ($("#TimeStart").val() == "") { - $("#TimeStart").val(start); + var start = formatTimestamp(new Date(((new Date() / 1000) - 900 - new Date().getTimezoneOffset() * 60)* 1000)); + if ($("#Timestamp_gt").val() == "") { + $("#Timestamp_gt").val(start); } - var end = new Date().toISOString().substr(0, 16) - if ($("#TimeEnd").val() == "") { - $("#TimeEnd").val(end); + var end = formatTimestamp(new Date(((new Date() / 1000) - new Date().getTimezoneOffset() * 60)* 1000)); + if ($("#Timestamp_lt").val() == "") { + $("#Timestamp_lt").val(end); } - $.when(loadRouters(), loadProtocols()).done(function() { - $("#Router").on('input', function() { + $.when(loadAgents(), loadProtocols()).done(function() { + $("#Agent").on('input', function() { loadInterfaceOptions(); }) populateForm(); }) - $("#submit").on('click', submitQuery); + $("form").on('submit', submitQuery); google.charts.load('current', { 'packages': ['corechart'] }); - google.charts.setOnLoadCallback(drawChart); -}); - -function getProtocolId(name) { - for (var k in protocols) { - if (protocols[k] == name) { - return k; - } - } - return null; -} - -function getIntId(rtr, name) { - if (!rtrs[rtr]) { - return null; - } - for (var k in rtrs[rtr]['interfaces']) { - if (rtrs[rtr]['interfaces'][k] == name) { - return k; - } - } - return null; -} - -function getRouterById(id) { - for (var k in rtrs) { - if (rtrs[k]['id'] == id) { - return k; - } + + window.onhashchange = function () { + populateForm() + google.charts.setOnLoadCallback(drawChart); } - return null; -} -function getInterfaceById(router, id) { - return rtrs[router]['interfaces'][id]; -} + google.charts.setOnLoadCallback(drawChart); +}); function submitQuery() { - var query = { - Cond: [], - Breakdown: {}, - TopN: parseInt($("#topn").val()) - }; + var breakdown = [] + var query = {}; - console.log($("#TimeStart").val()); - var start = new Date($("#TimeStart").val()); - var end = new Date($("#TimeEnd").val()); - start = Math.round(start.getTime() / 1000); - end = Math.round(end.getTime() / 1000); - query['Cond'].push({ - Field: FieldTimestamp, - Operator: OpGreater, - Operand: start + "" - }); - query['Cond'].push({ - Field: FieldTimestamp, - Operator: OpSmaller, - Operand: end + "" - }); + $(".in input").each(function(){ + var field = this.id.replace("_",".") + var value = this.value - for (var k in fields) { - tmp = $("#" + k).val(); - if (tmp == "") { - continue; + if (value == "") { + return; } - if (k == "Router") { - tmp = rtrs[tmp]['id']; - } else if (k == "IntIn" || k == "IntOut") { - tmp = getIntId($("#Router").val(), tmp) - if (tmp == null) { - return; - } - } else if (k == "Protocol") { - tmp = getProtocolId(tmp); - if (tmp == null) { - return; - } + + if (this.id.match(/^Timestamp/)){ + value = Math.round(new Date(value).getTime() / 1000); } - query['Cond'].push({ - Field: fields[k], - Operator: OpEqual, - Operand: tmp + "" - }); - } + query[field] = value + "" + }) - for (var i = 0; i < bdfields.length; i++) { - if (!$("#bd" + bdfields[i]).prop('checked')) { - continue; - } - query['Breakdown'][bdfields[i]] = true; + $(".bd input:checked").each(function(){ + breakdown.push(this.id.replace(/^bd/,"")); + }) + if (breakdown.length) { + query.Breakdown = breakdown.join(",") } - console.log(query); - $("#query").val(JSON.stringify(query)); - $("#form").submit(); + location.href = "#" + jQuery.param(query) + return false } \ No newline at end of file diff --git a/vendors/papaparse/papaparse.min.js b/vendors/papaparse/papaparse.min.js new file mode 100644 index 0000000..661e101 --- /dev/null +++ b/vendors/papaparse/papaparse.min.js @@ -0,0 +1,6 @@ +/*! + Papa Parse + v4.3.2 + https://github.com/mholt/PapaParse +*/ +!function(a,b){"function"==typeof define&&define.amd?define([],b):"object"==typeof module&&module.exports?module.exports=b():a.Papa=b()}(this,function(){"use strict";function a(a,b){b=b||{};var c=b.dynamicTyping||!1;if(r(c)&&(b.dynamicTypingFunction=c,c={}),b.dynamicTyping=c,b.worker&&z.WORKERS_SUPPORTED){var h=k();return h.userStep=b.step,h.userChunk=b.chunk,h.userComplete=b.complete,h.userError=b.error,b.step=r(b.step),b.chunk=r(b.chunk),b.complete=r(b.complete),b.error=r(b.error),delete b.worker,void h.postMessage({input:a,config:b,workerId:h.id})}var i=null;return"string"==typeof a?i=b.download?new d(b):new f(b):a.readable===!0&&r(a.read)&&r(a.on)?i=new g(b):(t.File&&a instanceof File||a instanceof Object)&&(i=new e(b)),i.stream(a)}function b(a,b){function c(){"object"==typeof b&&("string"==typeof b.delimiter&&1===b.delimiter.length&&z.BAD_DELIMITERS.indexOf(b.delimiter)===-1&&(j=b.delimiter),("boolean"==typeof b.quotes||b.quotes instanceof Array)&&(h=b.quotes),"string"==typeof b.newline&&(k=b.newline),"string"==typeof b.quoteChar&&(l=b.quoteChar),"boolean"==typeof b.header&&(i=b.header))}function d(a){if("object"!=typeof a)return[];var b=[];for(var c in a)b.push(c);return b}function e(a,b){var c="";"string"==typeof a&&(a=JSON.parse(a)),"string"==typeof b&&(b=JSON.parse(b));var d=a instanceof Array&&a.length>0,e=!(b[0]instanceof Array);if(d&&i){for(var g=0;g0&&(c+=j),c+=f(a[g],g);b.length>0&&(c+=k)}for(var h=0;h0&&(c+=j);var n=d&&e?a[m]:m;c+=f(b[h][n],m)}h-1||" "===a.charAt(0)||" "===a.charAt(a.length-1);return c?l+a+l:a}function g(a,b){for(var c=0;c-1)return!0;return!1}var h=!1,i=!0,j=",",k="\r\n",l='"';c();var m=new RegExp(l,"g");if("string"==typeof a&&(a=JSON.parse(a)),a instanceof Array){if(!a.length||a[0]instanceof Array)return e(null,a);if("object"==typeof a[0])return e(d(a[0]),a)}else if("object"==typeof a)return"string"==typeof a.data&&(a.data=JSON.parse(a.data)),a.data instanceof Array&&(a.fields||(a.fields=a.meta&&a.meta.fields),a.fields||(a.fields=a.data[0]instanceof Array?a.fields:d(a.data[0])),a.data[0]instanceof Array||"object"==typeof a.data[0]||(a.data=[a.data])),e(a.fields||[],a.data||[]);throw"exception: Unable to serialize unrecognized input"}function c(a){function b(a){var b=p(a);b.chunkSize=parseInt(b.chunkSize),a.step||a.chunk||(b.chunkSize=null),this._handle=new h(b),this._handle.streamer=this,this._config=b}this._handle=null,this._paused=!1,this._finished=!1,this._input=null,this._baseIndex=0,this._partialLine="",this._rowCount=0,this._start=0,this._nextChunk=null,this.isFirstChunk=!0,this._completeResults={data:[],errors:[],meta:{}},b.call(this,a),this.parseChunk=function(a){if(this.isFirstChunk&&r(this._config.beforeFirstChunk)){var b=this._config.beforeFirstChunk(a);void 0!==b&&(a=b)}this.isFirstChunk=!1;var c=this._partialLine+a;this._partialLine="";var d=this._handle.parse(c,this._baseIndex,!this._finished);if(!this._handle.paused()&&!this._handle.aborted()){var e=d.meta.cursor;this._finished||(this._partialLine=c.substring(e-this._baseIndex),this._baseIndex=e),d&&d.data&&(this._rowCount+=d.data.length);var f=this._finished||this._config.preview&&this._rowCount>=this._config.preview;if(v)t.postMessage({results:d,workerId:z.WORKER_ID,finished:f});else if(r(this._config.chunk)){if(this._config.chunk(d,this._handle),this._paused)return;d=void 0,this._completeResults=void 0}return this._config.step||this._config.chunk||(this._completeResults.data=this._completeResults.data.concat(d.data),this._completeResults.errors=this._completeResults.errors.concat(d.errors),this._completeResults.meta=d.meta),!f||!r(this._config.complete)||d&&d.meta.aborted||this._config.complete(this._completeResults,this._input),f||d&&d.meta.paused||this._nextChunk(),d}},this._sendError=function(a){r(this._config.error)?this._config.error(a):v&&this._config.error&&t.postMessage({workerId:z.WORKER_ID,error:a,finished:!1})}}function d(a){function b(a){var b=a.getResponseHeader("Content-Range");return null===b?-1:parseInt(b.substr(b.lastIndexOf("/")+1))}a=a||{},a.chunkSize||(a.chunkSize=z.RemoteChunkSize),c.call(this,a);var d;u?this._nextChunk=function(){this._readChunk(),this._chunkLoaded()}:this._nextChunk=function(){this._readChunk()},this.stream=function(a){this._input=a,this._nextChunk()},this._readChunk=function(){if(this._finished)return void this._chunkLoaded();if(d=new XMLHttpRequest,this._config.withCredentials&&(d.withCredentials=this._config.withCredentials),u||(d.onload=q(this._chunkLoaded,this),d.onerror=q(this._chunkError,this)),d.open("GET",this._input,!u),this._config.downloadRequestHeaders){var a=this._config.downloadRequestHeaders;for(var b in a)d.setRequestHeader(b,a[b])}if(this._config.chunkSize){var c=this._start+this._config.chunkSize-1;d.setRequestHeader("Range","bytes="+this._start+"-"+c),d.setRequestHeader("If-None-Match","webkit-no-cache")}try{d.send()}catch(a){this._chunkError(a.message)}u&&0===d.status?this._chunkError():this._start+=this._config.chunkSize},this._chunkLoaded=function(){if(4==d.readyState){if(d.status<200||d.status>=400)return void this._chunkError();this._finished=!this._config.chunkSize||this._start>b(d),this.parseChunk(d.responseText)}},this._chunkError=function(a){var b=d.statusText||a;this._sendError(b)}}function e(a){a=a||{},a.chunkSize||(a.chunkSize=z.LocalChunkSize),c.call(this,a);var b,d,e="undefined"!=typeof FileReader;this.stream=function(a){this._input=a,d=a.slice||a.webkitSlice||a.mozSlice,e?(b=new FileReader,b.onload=q(this._chunkLoaded,this),b.onerror=q(this._chunkError,this)):b=new FileReaderSync,this._nextChunk()},this._nextChunk=function(){this._finished||this._config.preview&&!(this._rowCount=this._input.size,this.parseChunk(a.target.result)},this._chunkError=function(){this._sendError(b.error)}}function f(a){a=a||{},c.call(this,a);var b,d;this.stream=function(a){return b=a,d=a,this._nextChunk()},this._nextChunk=function(){if(!this._finished){var a=this._config.chunkSize,b=a?d.substr(0,a):d;return d=a?d.substr(a):"",this._finished=!d,this.parseChunk(b)}}}function g(a){a=a||{},c.call(this,a);var b=[],d=!0;this.stream=function(a){this._input=a,this._input.on("data",this._streamData),this._input.on("end",this._streamEnd),this._input.on("error",this._streamError)},this._nextChunk=function(){b.length?this.parseChunk(b.shift()):d=!0},this._streamData=q(function(a){try{b.push("string"==typeof a?a:a.toString(this._config.encoding)),d&&(d=!1,this.parseChunk(b.shift()))}catch(a){this._streamError(a)}},this),this._streamError=q(function(a){this._streamCleanUp(),this._sendError(a.message)},this),this._streamEnd=q(function(){this._streamCleanUp(),this._finished=!0,this._streamData("")},this),this._streamCleanUp=q(function(){this._input.removeListener("data",this._streamData),this._input.removeListener("end",this._streamEnd),this._input.removeListener("error",this._streamError)},this)}function h(a){function b(){if(x&&o&&(l("Delimiter","UndetectableDelimiter","Unable to auto-detect delimiting character; defaulted to '"+z.DefaultDelimiter+"'"),o=!1),a.skipEmptyLines)for(var b=0;b=w.length?"__parsed_extra":w[d]),g=f(e,g),"__parsed_extra"===e?(c[e]=c[e]||[],c[e].push(g)):c[e]=g}x.data[b]=c,a.header&&(d>w.length?l("FieldMismatch","TooManyFields","Too many fields: expected "+w.length+" fields but parsed "+d,b):d1&&(k+=Math.abs(o-f),f=o):f=o}m.data.length>0&&(l/=m.data.length),("undefined"==typeof e||k1.99&&(e=k,d=j)}return a.delimiter=d,{successful:!!d,bestDelimiter:d}}function j(a){a=a.substr(0,1048576);var b=a.split("\r"),c=a.split("\n"),d=c.length>1&&c[0].length=b.length/2?"\r\n":"\r"}function k(a){var b=q.test(a);return b?parseFloat(a):a}function l(a,b,c,d){x.errors.push({type:a,code:b,message:c,row:d})}var m,n,o,q=/^\s*-?(\d*\.?\d+|\d+\.?\d*)(e[-+]?\d+)?\s*$/i,s=this,t=0,u=!1,v=!1,w=[],x={data:[],errors:[],meta:{}};if(r(a.step)){var y=a.step;a.step=function(d){if(x=d,c())b();else{if(b(),0===x.data.length)return;t+=d.data.length,a.preview&&t>a.preview?n.abort():y(x,s)}}}this.parse=function(c,d,e){if(a.newline||(a.newline=j(c)),o=!1,a.delimiter)r(a.delimiter)&&(a.delimiter=a.delimiter(c),x.meta.delimiter=a.delimiter);else{var f=h(c,a.newline);f.successful?a.delimiter=f.bestDelimiter:(o=!0,a.delimiter=z.DefaultDelimiter),x.meta.delimiter=a.delimiter}var g=p(a);return a.preview&&a.header&&g.preview++,m=c,n=new i(g),x=n.parse(m,d,e),b(),u?{meta:{paused:!0}}:x||{meta:{paused:!1}}},this.paused=function(){return u},this.pause=function(){u=!0,n.abort(),m=m.substr(n.getCharIndex())},this.resume=function(){u=!1,s.streamer.parseChunk(m)},this.aborted=function(){return v},this.abort=function(){v=!0,n.abort(),x.meta.aborted=!0,r(a.complete)&&a.complete(x),m=""}}function i(a){a=a||{};var b=a.delimiter,c=a.newline,d=a.comments,e=a.step,f=a.preview,g=a.fastMode,h=a.quoteChar||'"';if(("string"!=typeof b||z.BAD_DELIMITERS.indexOf(b)>-1)&&(b=","),d===b)throw"Comment character same as delimiter";d===!0?d="#":("string"!=typeof d||z.BAD_DELIMITERS.indexOf(d)>-1)&&(d=!1),"\n"!=c&&"\r"!=c&&"\r\n"!=c&&(c="\n");var i=0,j=!1;this.parse=function(a,k,l){function m(a){x.push(a),A=i}function n(b){return l?p():("undefined"==typeof b&&(b=a.substr(i)),z.push(b),i=s,m(z),w&&q(),p())}function o(b){i=b,m(z),z=[],E=a.indexOf(c,i)}function p(a){return{data:x,errors:y,meta:{delimiter:b,linebreak:c,aborted:j,truncated:!!a,cursor:A+(k||0)}}}function q(){e(p()),x=[],y=[]}if("string"!=typeof a)throw"Input must be a string";var s=a.length,t=b.length,u=c.length,v=d.length,w=r(e);i=0;var x=[],y=[],z=[],A=0;if(!a)return p();if(g||g!==!1&&a.indexOf(h)===-1){for(var B=a.split(c),C=0;C=f)return x=x.slice(0,f),p(!0)}}return p()}for(var D=a.indexOf(b,i),E=a.indexOf(c,i),F=new RegExp(h+h,"g");;)if(a[i]!==h)if(d&&0===z.length&&a.substr(i,v)===d){if(E===-1)return p();i=E+u,E=a.indexOf(c,i),D=a.indexOf(b,i)}else if(D!==-1&&(D=f)return p(!0)}else{var G=i;for(i++;;){var G=a.indexOf(h,G+1);if(G===-1)return l||y.push({type:"Quotes",code:"MissingQuotes",message:"Quoted field unterminated",row:x.length,index:i}),n();if(G===s-1){var H=a.substring(i,G).replace(F,h);return n(H)}if(a[G+1]!==h){if(a[G+1]===b){z.push(a.substring(i,G).replace(F,h)),i=G+1+t,D=a.indexOf(b,i),E=a.indexOf(c,i);break}if(a.substr(G+1,u)===c){if(z.push(a.substring(i,G).replace(F,h)),o(G+1+u),D=a.indexOf(b,i),w&&(q(),j))return p();if(f&&x.length>=f)return p(!0);break}}else G++}}return n()},this.abort=function(){j=!0},this.getCharIndex=function(){return i}}function j(){var a=document.getElementsByTagName("script");return a.length?a[a.length-1].src:""}function k(){if(!z.WORKERS_SUPPORTED)return!1;if(!w&&null===z.SCRIPT_PATH)throw new Error("Script path cannot be determined automatically when Papa Parse is loaded asynchronously. You need to set Papa.SCRIPT_PATH manually.");var a=z.SCRIPT_PATH||s;a+=(a.indexOf("?")!==-1?"&":"?")+"papaworker";var b=new t.Worker(a);return b.onmessage=l,b.id=y++,x[b.id]=b,b}function l(a){var b=a.data,c=x[b.workerId],d=!1;if(b.error)c.userError(b.error,b.file);else if(b.results&&b.results.data){var e=function(){d=!0,m(b.workerId,{data:[],errors:[],meta:{aborted:!0}})},f={abort:e,pause:n,resume:n};if(r(c.userStep)){for(var g=0;g