diff --git a/.travis.yml b/.travis.yml index 57cdfd5..82a25a7 100644 --- a/.travis.yml +++ b/.travis.yml @@ -9,6 +9,5 @@ go: - 1.4 install: - - go get ./... - - go fmt + - make - go vet diff --git a/Dockerfile b/Dockerfile index 3eea7be..349dac0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,19 +1,15 @@ -FROM golang:1.5.1-onbuild +FROM golang:1.5.1 MAINTAINER kpacha -ENV MESOS_MASTER_HOST=leader.mesos -ENV MESOS_MASTER_PORT=5050 -ENV MESOS_SLAVE_HOST=slave.mesos -ENV MESOS_SLAVE_PORT=5051 - -ENV MARATHON_HOST=marathon.mesos -ENV MARATHON_PORT=8080 - -ENV INFLUXDB_HOST=influxdb -ENV INFLUXDB_PORT=8086 -ENV INFLUXDB_DB=mesos ENV INFLUXDB_USER=root ENV INFLUXDB_PWD=root -ENV COLLECTOR_LAPSE=1 -ENV COLLECTOR_LIFETIME=1800 +RUN mkdir -p /go/src/github.com/kpacha/mesos-influxdb-collector +COPY . /go/src/github.com/kpacha/mesos-influxdb-collector + +WORKDIR /go/src/github.com/kpacha/mesos-influxdb-collector +RUN make install + +ENTRYPOINT ["/go/bin/mesos-influxdb-collector"] + +CMD ["-c", "conf.hcl"] \ No newline at end of file diff --git a/Dockerfile-min b/Dockerfile-min new file mode 100644 index 0000000..2f9223f --- /dev/null +++ b/Dockerfile-min @@ -0,0 +1,12 @@ +FROM busybox:1.24.1 +MAINTAINER kpacha + +ENV INFLUXDB_USER=root +ENV INFLUXDB_PWD=root + +COPY ./mesos-influxdb-collector /mesos-influxdb-collector +COPY ./conf.hcl /conf.hcl + +ENTRYPOINT ["/./mesos-influxdb-collector"] + +CMD ["-c", "/conf.hcl"] diff --git a/Makefile b/Makefile new file mode 100644 index 0000000..14f55f3 --- /dev/null +++ b/Makefile @@ -0,0 +1,23 @@ +all: deps build test + +deps: + go get github.com/hashicorp/hcl + go get github.com/influxdb/influxdb/client + +gen: + go fmt ./... + +build: + sh -c 'CGO_ENABLED=0 go build -a --installsuffix cgo --ldflags="-s" -o mesos-influxdb-collector' + +install: all do_install + +do_install: + go install + +test: + go test -cover ./... + +docker: + docker run --rm -it -e "GOPATH=/go" -v "${PWD}:/go/src/github.com/kpacha/mesos-influxdb-collector" -w /go/src/github.com/kpacha/mesos-influxdb-collector golang:1.5.1 make + docker build -f Dockerfile-min -t kpacha/mesos-influxdb-collector:mesos-dns-min . \ No newline at end of file diff --git a/README.md b/README.md index e5578fa..50f3b7f 100644 --- a/README.md +++ b/README.md @@ -3,7 +3,16 @@ mesos-influxdb-collector Lightweight mesos stats collector for influxdb -Since this collector is intended to be deployed as a [marathon](https://mesosphere.github.io/marathon) app, it comes with a *lifetime* param. This defines how long the collector will run until it dies, so marathon will re-launch it, allowing easy allocation optimizations. Check the [marathon/](https://github.com/kpacha/mesos-influxdb-collector/tree/master/marathon) folder for more details on how to launch it. +Since this collector is intended to be deployed as a [marathon](https://mesosphere.github.io/marathon) app, it comes with a *lifetime* param. This defines how long the collector will run until it dies, so marathon will re-launch it, allowing easy allocation optimizations. Check the [fixtures/marathon/](https://github.com/kpacha/mesos-influxdb-collector/tree/master/fixtures/marathon) folder for more details on how to launch it. + +# Goals + ++ Discover the mesos cluster through `mesos-dns` ++ Collect the mesos leader stats ++ Collect the mesos master stats ++ Collect the mesos slave stats ++ Collect the marathon master stats ++ Collect the chronos task stats (TODO) # Installing @@ -19,27 +28,93 @@ Alternatively, if you have Go installed: $ go get github.com/kpacha/mesos-influxdb-collector ``` -# Running +# Integration with `mesos-dns` + +The `mesos-influxdb-collector` is able to discover all your mesos nodes (masters and slaves) and the marathon master using the REST API exposed by the [mesos-dns](http://mesosphere.github.io/mesos-dns/) service. Check the next section for details. + +# Configuration The collector use these environmental vars: -+ `INFLUXDB_DB` -+ `INFLUXDB_HOST` -+ `INFLUXDB_PORT` + `INFLUXDB_USER` + `INFLUXDB_PWD` -+ `MESOS_MASTER_HOST` -+ `MESOS_MASTER_PORT` -+ `MESOS_SLAVE_HOST` -+ `MESOS_SLAVE_PORT` -+ `MARATHON_HOST` -+ `MARATHON_PORT` -+ `COLLECTOR_LAPSE` -+ `COLLECTOR_LIFETIME` + +It also requires a config file with the list of nodes to monitor or the details about the `mesos-dns` service among these other params: + ++ *Lapse*: time between consecutive collections. Default: 30 seconds ++ *DieAfter*: duration of the running instance. Default: 1 hour + +### MesosDNS + +Optional. Add it if you have a `mesos-dns` service running in your mesos cluster. + +``` +mesosDNS { + domain = "mesos" // the domain used by the mesos-dns service + marathon = true // resolve marathon master + host = "master.mesos" // host of the mesos-dns service + port = 8123 // port of the REST API +} +``` + +### InfluxDB + +Required. + +``` +influxdb { + host = "influxdb.marathon.mesos" // host of the influxdb instance + port = 8086 // port of the REST API + db = "mesos" // name of the database to use + checkLapse = 30 // ping frequency +} +``` + +### Mesos masters + +For manual definition of some (or all) mesos masters, use the `Master` struct: + +``` +Master { + host = "localhost" + port = 5051 + leader = true // optional +} +Master { + host = "localhost" + port = 5052 +} +``` + +### Mesos slaves + +For manual definition of some (or all) mesos slave, use the `Slave` struct: + +``` +Slave { + host = "localhost" + port = 5051 +} +``` + +### Marathon instances + +For manual definition of some (or all) marathon instances, use the `Marathon` struct: + +``` +Marathon { + host = "localhost" + port = 5052 +} +``` + +Check [`config/configuration_test.go`](https://github.com/kpacha/mesos-influxdb-collector/blob/master/config/configuration_test.go) and [`conf.hcl`](https://github.com/kpacha/mesos-influxdb-collector/blob/master/conf.hcl) for examples. + +# Running ## Dockerized version -Run the container with the default params (check the Dockerfile and overwrite whatever you need): +Run the container with the default params: ``` $ docker pull --name mesos-influxdb-collector \ @@ -48,77 +123,27 @@ $ docker pull --name mesos-influxdb-collector \ -it --rm kpacha/mesos-influxdb-collector ``` -Since the default value for `INFLUXDB_HOST` is `ìnfluxb`, you can link the collector to the influxdb container, dependeing on your environment. +If you need to customize something, copy the `conf.hcl`, make your changes and link it as a volume: ``` -$ docker run --name mesos-influxdb-collector \ - --link influxdb \ - -it --rm kpacha/mesos-influxdb-collector +$ docker pull --name mesos-influxdb-collector \ + -v /path/to/my/custom/conf.hcl:/tmp/conf.hcl \ + -it --rm kpacha/mesos-influxdb-collector -c /tmp/conf.hcl ``` +Tip: if you link your config file to `/go/src/github.com/kpacha/mesos-influxdb-collector/conf.hcl` you don't need to worry about that flag! + ## Binary version ``` $ ./mesos-influxdb-collector -h Usage of ./mesos-influxdb-collector: - -Id string - influxdb database (default "mesos") - -Ih string - influxdb host (default "localhost") - -Ip int - influxdb port (default 8086) - -Mmh string - mesos master host (default "localhost") - -Mmp int - mesos master port (default 5050) - -Msh string - mesos slave host (default "localhost") - -Msp int - mesos slave port (default 5051) - -d int - die after N seconds (default 300) - -l int - sleep time between collections in seconds (default 1) - -mh string - marathon host (default "localhost") - -mp int - marathon port (default 8080) -``` - -This is the relation between those params and the environmnetal variables listed above. - -Flag | EnvVar ----- | ------ -`Id` | `INFLUXDB_DB` -`Ih` | `INFLUXDB_HOST` -`Ip` | `INFLUXDB_PORT` -`Mmh` | `MESOS_MASTER_HOST` -`Mmp` | `MESOS_MASTER_PORT` -`Msh` | `MESOS_SLAVE_HOST` -`Msp` | `MESOS_SLAVE_PORT` -`mh` | `MARATHON_HOST` -`mp` | `MARATHON_PORT` -`d` | `COLLECTOR_LAPSE` -`l` | `COLLECTOR_LIFETIME` + -c string + path to the config file (default "conf.hcl") +``` The credentials for the influxdb database are accepted just as env_var (`INFLUXDB_USER` & `INFLUXDB_PWD`) -## Testing environment - -In order to do a quick test of the collector, you can use one of the available mesos test environments: [playa-mesos](https://github.com/mesosphere/playa-mesos) & [mesoscope](https://github.com/schibsted/mesoscope). The other components can be deployed with public containers. Replace the `$DOCKER_IP` and `$MESOS_HOST` with the correct values. If you are running the mesoscope env, `MESOS_HOST=$DOCKER_IP`. For the playa-mesos option, `MESOS_HOST=10.141.141.10`. - -``` -$ docker run --name influxdb -p 8083:8083 -p 8086:8086 \ - --expose 8090 --expose 8099 \ - -d tutum/influxdb -$ docker run --name grafana -p 3000:3000 \ - -e GF_SERVER_ROOT_URL="http://$DOCKER_IP" \ - -e GF_SECURITY_ADMIN_PASSWORD=secret \ - -d grafana/grafana -$ docker run --name mesos-influxdb-collector \ - --link influxdb \ - -e MESOS_HOST=$MESOS_HOST \ - -it --rm kpacha/mesos-influxdb-collector -``` +# Grafana dashboards -The `grafana` folder contains several grafana dashboard definitions. Go to the grafana website (`http://$DOCKER_IP:3000/) and, after configuring the influxdb datasource, import them and start monitoring your mesos cluster. +The `fixtures/grafana` folder contains several grafana dashboard definitions. Go to the grafana website and, after configuring the influxdb datasource, import them and start monitoring your mesos cluster. diff --git a/collector.go b/collector.go deleted file mode 100644 index a22e1a2..0000000 --- a/collector.go +++ /dev/null @@ -1,34 +0,0 @@ -package main - -import ( - "fmt" - "github.com/kpacha/mesos-influxdb-collector/collector" - "github.com/kpacha/mesos-influxdb-collector/parser/marathon" - "github.com/kpacha/mesos-influxdb-collector/parser/mesos" - "log" - "net/url" -) - -func NewMesosMasterCollector(host string, port int) collector.Collector { - u, err := url.Parse(fmt.Sprintf("http://%s:%d/metrics/snapshot", host, port)) - if err != nil { - log.Fatal("Error building the mesos master collector:", err) - } - return collector.UrlCollector{Url: u.String(), Parser: mesos.MasterParser{Node: host}} -} - -func NewMesosSlaveCollector(host string, port int) collector.Collector { - u, err := url.Parse(fmt.Sprintf("http://%s:%d/metrics/snapshot", host, port)) - if err != nil { - log.Fatal("Error building the mesos slave collector:", err) - } - return collector.UrlCollector{Url: u.String(), Parser: mesos.SlaveParser{Node: host}} -} - -func NewMarathonCollector(host string, port int) collector.Collector { - u, err := url.Parse(fmt.Sprintf("http://%s:%d/metrics", host, port)) - if err != nil { - log.Fatal("Error building the marathon collector:", err) - } - return collector.UrlCollector{Url: u.String(), Parser: marathon.MarathonParser{Node: host}} -} diff --git a/collector/collector.go b/collector/collector.go index 776a6ae..5656e22 100644 --- a/collector/collector.go +++ b/collector/collector.go @@ -15,10 +15,6 @@ type MultiCollector struct { Collectors []Collector } -func NewMultiCollector(collectors []Collector) Collector { - return MultiCollector{collectors} -} - func (mc MultiCollector) Collect() ([]store.Point, error) { var data []store.Point for _, c := range mc.Collectors { diff --git a/collector/factory.go b/collector/factory.go new file mode 100644 index 0000000..f8b4a81 --- /dev/null +++ b/collector/factory.go @@ -0,0 +1,60 @@ +package collector + +import ( + "fmt" + "github.com/kpacha/mesos-influxdb-collector/config" + "github.com/kpacha/mesos-influxdb-collector/parser/marathon" + "github.com/kpacha/mesos-influxdb-collector/parser/mesos" + "log" + "net/url" +) + +func NewCollectorFromConfig(configuration *config.Config) Collector { + var collectors []Collector + + for _, master := range configuration.Master { + collectors = append(collectors, NewMesosMasterCollector(master.Host, master.Port, master.Leader)) + } + for _, slave := range configuration.Slave { + collectors = append(collectors, NewMesosSlaveCollector(slave.Host, slave.Port)) + } + for _, marathonInstance := range configuration.Marathon { + collectors = append(collectors, NewMarathonCollector(marathonInstance.Host, marathonInstance.Port)) + } + + log.Println("Total collectors created:", len(collectors)) + + return NewMultiCollector(collectors) +} + +func NewMultiCollector(collectors []Collector) Collector { + return MultiCollector{collectors} +} + +func NewMesosLeaderCollector(host string, port int) Collector { + return NewMesosMasterCollector(host, port, true) +} + +func NewMesosMasterCollector(host string, port int, leader bool) Collector { + u, err := url.Parse(fmt.Sprintf("http://%s:%d/metrics/snapshot", host, port)) + if err != nil { + log.Fatal("Error building the mesos master collector:", err) + } + return UrlCollector{Url: u.String(), Parser: mesos.MasterParser{Node: host, Leader: leader}} +} + +func NewMesosSlaveCollector(host string, port int) Collector { + u, err := url.Parse(fmt.Sprintf("http://%s:%d/metrics/snapshot", host, port)) + if err != nil { + log.Fatal("Error building the mesos slave collector:", err) + } + return UrlCollector{Url: u.String(), Parser: mesos.SlaveParser{Node: host}} +} + +func NewMarathonCollector(host string, port int) Collector { + u, err := url.Parse(fmt.Sprintf("http://%s:%d/metrics", host, port)) + if err != nil { + log.Fatal("Error building the marathon collector:", err) + } + return UrlCollector{Url: u.String(), Parser: marathon.MarathonParser{Node: host}} +} diff --git a/collector/factory_test.go b/collector/factory_test.go new file mode 100644 index 0000000..14878ea --- /dev/null +++ b/collector/factory_test.go @@ -0,0 +1,33 @@ +package collector + +import ( + "fmt" + "github.com/kpacha/mesos-influxdb-collector/config" +) + +func ExampleCollectorFromConfig() { + txtConfig := `Master { + host = "localhost" + port = 5050 + leader = true + } + Master { + host = "localhost" + port = 15050 + } + Slave { + host = "localhost" + port = 5051 + } + Slave { + host = "localhost" + port = 5052 + }` + cp := config.ConfigParser{} + c, _ := cp.ParseConfig(txtConfig) + collector := NewCollectorFromConfig(c) + fmt.Println(collector) + + // Output: + // {[{http://localhost:5050/metrics/snapshot {localhost true}} {http://localhost:15050/metrics/snapshot {localhost false}} {http://localhost:5051/metrics/snapshot {localhost}} {http://localhost:5052/metrics/snapshot {localhost}}]} +} diff --git a/conf.hcl b/conf.hcl new file mode 100644 index 0000000..0475077 --- /dev/null +++ b/conf.hcl @@ -0,0 +1,14 @@ +mesosDNS { + domain = "mesos" + marathon = true + host = "master.mesos" + port = 8123 +} +influxdb { + host = "influxdb.marathon.mesos" + port = 8086 + db = "mesos" + checkLapse = 30 +} +lapse=5 +dieAfter = 300 diff --git a/config/configuration.go b/config/configuration.go new file mode 100644 index 0000000..ed60adb --- /dev/null +++ b/config/configuration.go @@ -0,0 +1,93 @@ +package config + +import ( + "log" + + "github.com/hashicorp/hcl" + "io/ioutil" +) + +type Config struct { + MesosDNS *MesosDNS + Master []Master + Slave []Server + Marathon []Server + InfluxDB *InfluxDB + Lapse int + DieAfter int +} + +type MesosDNS struct { + Domain string + Marathon bool + Host string + Port int +} + +type Server struct { + Host string + Port int +} + +type Master struct { + Host string + Port int + Leader bool +} + +type InfluxDB struct { + Host string + Port int + DB string + CheckLapse int +} + +type ConfigParser struct { + Path string + AllowDNS bool +} + +func (cp ConfigParser) Parse() (*Config, error) { + return cp.ParseConfigFile(cp.Path) +} + +func (cp ConfigParser) ParseConfigFile(file string) (*Config, error) { + hclText, err := ioutil.ReadFile(file) + if err != nil { + return nil, err + } + + return cp.ParseConfig(string(hclText)) +} + +func (cp ConfigParser) ParseConfig(hclText string) (*Config, error) { + result := &Config{} + + if err := hcl.Decode(&result, hclText); err != nil { + return nil, err + } + + if result.Lapse == 0 { + result.Lapse = 30 + } + if result.DieAfter == 0 { + result.DieAfter = 3600 + } + + if result.InfluxDB == nil { + result.InfluxDB = &InfluxDB{"localhost", 8086, "mesos", 30} + } + if result.InfluxDB.CheckLapse == 0 { + result.InfluxDB.CheckLapse = 30 + } + + if cp.AllowDNS && result.MesosDNS != nil { + if _, err := NewDNSResolver(result); err != nil { + return nil, err + } + } + + log.Printf("%+v\n", result) + + return result, nil +} diff --git a/config/configuration_test.go b/config/configuration_test.go new file mode 100644 index 0000000..bf83847 --- /dev/null +++ b/config/configuration_test.go @@ -0,0 +1,119 @@ +package config + +import ( + "fmt" +) + +func ExampleParseMaster() { + txtConfig := `Master { + host = "localhost" + port = 5051 + leader = true + } + Master { + host = "localhost" + port = 5052 + } + dieafter = 1` + cp := ConfigParser{} + c, _ := cp.ParseConfig(txtConfig) + fmt.Println(c.MesosDNS) + fmt.Println(c.Master) + fmt.Println(c.Slave) + fmt.Println(c.Marathon) + fmt.Println(c.InfluxDB) + fmt.Println(c.Lapse) + fmt.Println(c.DieAfter) + // Output: + // + // [{localhost 5051 true} {localhost 5052 false}] + // [] + // [] + // &{localhost 8086 mesos 30} + // 30 + // 1 +} + +func ExampleParseSlave() { + txtConfig := `Slave { + host = "localhost" + port = 5051 + } + Slave { + host = "localhost" + port = 5052 + } + lapse=100 + dieAfter = 1` + cp := ConfigParser{} + c, _ := cp.ParseConfig(txtConfig) + fmt.Println(c.MesosDNS) + fmt.Println(c.Master) + fmt.Println(c.Slave) + fmt.Println(c.Marathon) + fmt.Println(c.InfluxDB) + fmt.Println(c.Lapse) + fmt.Println(c.DieAfter) + // Output: + // + // [] + // [{localhost 5051} {localhost 5052}] + // [] + // &{localhost 8086 mesos 30} + // 100 + // 1 +} + +func ExampleParseMarathon() { + txtConfig := `Marathon { + host = "localhost" + port = 5051 + } + Marathon { + host = "localhost" + port = 5052 + } + DieAfter = 1` + cp := ConfigParser{} + c, _ := cp.ParseConfig(txtConfig) + fmt.Println(c.MesosDNS) + fmt.Println(c.Master) + fmt.Println(c.Slave) + fmt.Println(c.Marathon) + fmt.Println(c.InfluxDB) + fmt.Println(c.Lapse) + fmt.Println(c.DieAfter) + // Output: + // + // [] + // [] + // [{localhost 5051} {localhost 5052}] + // &{localhost 8086 mesos 30} + // 30 + // 1 +} + +func ExampleParseInfluxDB() { + txtConfig := `influxdb { + host = "influx" + port = 18086 + db = "custom" + }` + cp := ConfigParser{} + c, _ := cp.ParseConfig(txtConfig) + fmt.Println(c.MesosDNS) + fmt.Println(c.Master) + fmt.Println(c.Slave) + fmt.Println(c.Marathon) + fmt.Println(c.InfluxDB) + fmt.Println(c.Lapse) + fmt.Println(c.DieAfter) + // Output: + // + // [] + // [] + // [] + // &{influx 18086 custom 30} + // 30 + // 3600 +} diff --git a/config/dns.go b/config/dns.go new file mode 100644 index 0000000..df081d5 --- /dev/null +++ b/config/dns.go @@ -0,0 +1,112 @@ +package config + +import ( + "encoding/json" + "fmt" + "log" + + "github.com/kpacha/mesos-influxdb-collector/reader" +) + +type ARecord struct { + Host string `json:"host"` + IP string `json:"ip"` +} + +type DNSResolver struct { + Config *Config +} + +func NewDNSResolver(config *Config) (*DNSResolver, error) { + resolver := DNSResolver{config} + if err := resolver.resolveMesosMasters(); err != nil { + log.Println("Error resolving MesosMasters") + return nil, err + } + if err := resolver.resolveMesosSlaves(); err != nil { + log.Println("Error resolving MesosSlaves") + return nil, err + } + if err := resolver.resolveMarathon(); err != nil { + log.Println("Error resolving Marathon") + return nil, err + } + return &resolver, nil +} + +func (r DNSResolver) resolveMesosMasters() error { + leaders, err := r.getARecords(r.getMesosLeaderUrl()) + if err != nil { + return err + } + masters, err := r.getARecords(r.getMesosMasterUrl()) + if err != nil { + return err + } + + log.Println("leaders", leaders) + log.Println("masters", masters) + + for _, master := range masters { + r.Config.Master = append(r.Config.Master, Master{master.IP, 5050, master.IP == leaders[0].IP}) + } + + return nil +} + +func (r DNSResolver) resolveMesosSlaves() error { + slaves, err := r.getARecords(r.getMesosSlaveUrl()) + if err != nil { + return err + } + + for _, slave := range slaves { + r.Config.Slave = append(r.Config.Slave, Server{slave.IP, 5051}) + } + + return nil +} + +func (r DNSResolver) resolveMarathon() error { + instances, err := r.getARecords(r.getMarathonUrl()) + if err != nil { + return err + } + + for _, instance := range instances { + r.Config.Marathon = append(r.Config.Marathon, Server{instance.IP, 8080}) + } + + return nil +} + +func (r DNSResolver) getARecords(url string) ([]ARecord, error) { + var instances []ARecord + body, err := reader.ReadUrl(url) + if err != nil { + return instances, err + } + + err = json.Unmarshal(body, &instances) + return instances, err +} + +func (r DNSResolver) getMesosMasterUrl() string { + return r.getUrl("v1/hosts/master") +} + +func (r DNSResolver) getMesosLeaderUrl() string { + return r.getUrl("v1/hosts/leader") +} + +func (r DNSResolver) getMesosSlaveUrl() string { + return r.getUrl("v1/hosts/slave") +} + +func (r DNSResolver) getMarathonUrl() string { + return r.getUrl("v1/hosts/marathon") +} + +func (r DNSResolver) getUrl(partial string) string { + return fmt.Sprintf("http://%s:%d/%s.%s.", r.Config.MesosDNS.Host, r.Config.MesosDNS.Port, partial, r.Config.MesosDNS.Domain) +} diff --git a/grafana/Home-1444161125840.json b/fixtures/grafana/Home-1444161125840.json similarity index 100% rename from grafana/Home-1444161125840.json rename to fixtures/grafana/Home-1444161125840.json diff --git a/grafana/Mesos Frameworks-1444161166910.json b/fixtures/grafana/Mesos Frameworks-1444161166910.json similarity index 100% rename from grafana/Mesos Frameworks-1444161166910.json rename to fixtures/grafana/Mesos Frameworks-1444161166910.json diff --git a/grafana/Mesos Messages-1444161170686.json b/fixtures/grafana/Mesos Messages-1444161170686.json similarity index 100% rename from grafana/Mesos Messages-1444161170686.json rename to fixtures/grafana/Mesos Messages-1444161170686.json diff --git a/grafana/Mesos Resources-1444161175566.json b/fixtures/grafana/Mesos Resources-1444161175566.json similarity index 100% rename from grafana/Mesos Resources-1444161175566.json rename to fixtures/grafana/Mesos Resources-1444161175566.json diff --git a/grafana/Mesos Slaves-1444161180086.json b/fixtures/grafana/Mesos Slaves-1444161180086.json similarity index 100% rename from grafana/Mesos Slaves-1444161180086.json rename to fixtures/grafana/Mesos Slaves-1444161180086.json diff --git a/grafana/Mesos Tasks-1444161186342.json b/fixtures/grafana/Mesos Tasks-1444161186342.json similarity index 100% rename from grafana/Mesos Tasks-1444161186342.json rename to fixtures/grafana/Mesos Tasks-1444161186342.json diff --git a/grafana/Mesos-1444161161537.json b/fixtures/grafana/Mesos-1444161161537.json similarity index 100% rename from grafana/Mesos-1444161161537.json rename to fixtures/grafana/Mesos-1444161161537.json diff --git a/fixtures/marathon/README.md b/fixtures/marathon/README.md new file mode 100644 index 0000000..37076a5 --- /dev/null +++ b/fixtures/marathon/README.md @@ -0,0 +1,110 @@ +Running the mesos-influxdb-collector on marathon +==== + +The easiest way to deploy the mesos-influxdb-collector in your cluster is by setting the `instance` value to `2` and forcing the scheduller to deploy them in different slaves. + +## Create a marathon app definition + +Fix the [mesos-influxdb-collector.json](https://github.com/kpacha/mesos-influxdb-collector/blob/master/marathon/mesos-influxdb-collector.json) file. The critical params to customize are `MESOS_HOST` and `INFLUXDB_HOST. + +## Deploy it! + +After updating the app definition, send it to your marathon instance (once again, replace the `$MARATHON_HOST` & `$MARATHON_PORT` with the right value for your environment) + +``` +$ curl -iH'Content-Type: application/json' -XPUT \ + -d@mesos-influxdb-collector.json \ + http://$MARATHON_HOST:$MARATHON_PORT/v2/apps/mesos-influxdb-collector +``` + +curl -iXPUT -H"Content-Type: application/json" 172.28.128.3:8080/v2/apps/mesos-influxdb-collector -d'{ + "id": "mesos-influxdb-collector", + "cpus": 0.1, + "mem": 64.0, + "instances": 2, + "container": { + "type": "DOCKER", + "docker": { + "image": "kpacha/mesos-influxdb-collector" + } + }, + "env": { + "MESOS_HOST": "172.17.42.1", + "INFLUXDB_HOST": "$HOST", + "COLLECTOR_LIFETIME": "300" + }, + "constraints": [ + ["hostname", "UNIQUE"] + ], + "backoffSeconds": 1, + "backoffFactor": 1.15, + "maxLaunchDelaySeconds": 300, + "upgradeStrategy": { + "minimumHealthCapacity": 1, + "maximumOverCapacity": 1 + } +}' + +curl -iXPUT -H"Content-Type: application/json" 172.28.128.3:8080/v2/apps/influxdb -d'{ + "id": "influxdb", + "cpus": 0.4, + "mem": 400.0, + "instances": 1, + "container": { + "type": "DOCKER", + "docker": { + "image": "tutum/influxdb", + "network": "BRIDGE", + "portMappings": [ + { "containerPort": 8083, "hostPort": 0, "servicePort": 8083, "protocol": "tcp" }, + { "containerPort": 8086, "hostPort": 0, "servicePort": 8086, "protocol": "tcp" } + ] + } + }, + "env": { + "MESOS_HOST": "10.0.2.15", + "INFLUXDB_HOST": "10.0.2.15", + "COLLECTOR_LIFETIME": "300" + }, + "constraints": [ + ["hostname", "UNIQUE"] + ], + "backoffSeconds": 1, + "backoffFactor": 1.15, + "maxLaunchDelaySeconds": 300, + "upgradeStrategy": { + "minimumHealthCapacity": 1, + "maximumOverCapacity": 1 + } +}' + +curl -iXPUT -H"Content-Type: application/json" 172.28.128.3:8080/v2/apps/grafana -d'{ + "id": "grafana", + "cpus": 0.2, + "mem": 400.0, + "instances": 1, + "container": { + "type": "DOCKER", + "docker": { + "image": "grafana/grafana", + "network": "BRIDGE", + "portMappings": [ + { "containerPort": 3000, "hostPort": 0, "servicePort": 3000, "protocol": "tcp" } + ] + } + }, + "env": { + "INFLUXDB_HOST": "10.0.2.15", + "COLLECTOR_LIFETIME": "300" + }, + "constraints": [ + ["hostname", "UNIQUE"] + ], + "backoffSeconds": 1, + "backoffFactor": 1.15, + "maxLaunchDelaySeconds": 300, + "upgradeStrategy": { + "minimumHealthCapacity": 1, + "maximumOverCapacity": 1 + } +}' \ No newline at end of file diff --git a/marathon/mesos-influxdb-collector.json b/fixtures/marathon/mesos-influxdb-collector.json similarity index 100% rename from marathon/mesos-influxdb-collector.json rename to fixtures/marathon/mesos-influxdb-collector.json diff --git a/main.go b/main.go index 67da6f3..d9d881d 100644 --- a/main.go +++ b/main.go @@ -3,6 +3,7 @@ package main import ( "flag" "github.com/kpacha/mesos-influxdb-collector/collector" + "github.com/kpacha/mesos-influxdb-collector/config" "github.com/kpacha/mesos-influxdb-collector/store" "log" "os" @@ -11,71 +12,41 @@ import ( ) const ( - InfluxdbHost = "localhost" - InfluxdbPort = 8086 - InfluxdbDB = "mesos" - InfluxdbUser = "root" - InfluxdbPass = "root" - MesosMasterHost = "localhost" - MesosMasterPort = 5050 - MesosSlaveHost = "localhost" - MesosSlavePort = 5051 - MarathonHost = "localhost" - MarathonPort = 8080 - DefaultLapse = 1 - DefaultLifeTime = 300 - DefaultLogLapse = 30 + ConfigPath = "conf.hcl" + InfluxdbUser = "root" + InfluxdbPass = "root" - InfluxdbEnvName = "INFLUXDB_HOST" - InfluxdbDBEnvName = "INFLUXDB_DB" - InfluxdbPortEnvName = "INFLUXDB_PORT" - InfluxdbUserEnvName = "INFLUXDB_USER" - InfluxdbPassEnvName = "INFLUXDB_PWD" - MesosMasterHostEnvName = "MESOS_MASTER_HOST" - MesosMasterPortEnvName = "MESOS_MASTER_PORT" - MesosSlaveHostEnvName = "MESOS_SLAVE_HOST" - MesosSlavePortEnvName = "MESOS_SLAVE_PORT" - MarathonHostEnvName = "MARATHON_HOST" - MarathonPortEnvName = "MARATHON_PORT" - LapseEnvName = "COLLECTOR_LAPSE" - LifeTimeEnvName = "COLLECTOR_LIFETIME" + ConfigPathEnvName = "CONFIG_FILE" + InfluxdbUserEnvName = "INFLUXDB_USER" + InfluxdbPassEnvName = "INFLUXDB_PWD" ) func main() { - ihost := flag.String("Ih", getStringParam(InfluxdbEnvName, InfluxdbHost), "influxdb host") - iport := flag.Int("Ip", getIntParam(InfluxdbPortEnvName, InfluxdbPort), "influxdb port") - idb := flag.String("Id", getStringParam(InfluxdbDBEnvName, InfluxdbDB), "influxdb database") - mmhost := flag.String("Mmh", getStringParam(MesosMasterHostEnvName, MesosMasterHost), "mesos master host") - mmport := flag.Int("Mmp", getIntParam(MesosMasterPortEnvName, MesosMasterPort), "mesos master port") - mshost := flag.String("Msh", getStringParam(MesosSlaveHostEnvName, MesosSlaveHost), "mesos slave host") - msport := flag.Int("Msp", getIntParam(MesosSlavePortEnvName, MesosSlavePort), "mesos slave port") - marathonHost := flag.String("mh", getStringParam(MarathonHostEnvName, MarathonHost), "marathon host") - marathonPort := flag.Int("mp", getIntParam(MarathonPortEnvName, MarathonPort), "marathon port") - lapse := flag.Int("l", getIntParam(LapseEnvName, DefaultLapse), "sleep time between collections in seconds") - dieAfter := flag.Int("d", getIntParam(LifeTimeEnvName, DefaultLifeTime), "die after N seconds") + configPath := flag.String("c", ConfigPath, "path to the config file") flag.Parse() - influxdb := store.NewInfluxdb(store.InfluxdbConfig{ - Host: *ihost, - Port: *iport, - DB: *idb, - Username: getStringParam(InfluxdbUserEnvName, InfluxdbUser), - Password: getStringParam(InfluxdbPassEnvName, InfluxdbPass), - CheckLapse: DefaultLogLapse, - }) + cp := config.ConfigParser{ + Path: *configPath, + AllowDNS: true, + } + + conf, err := cp.Parse() + if err != nil { + log.Println("Error parsing config file:", err.Error()) + return + } + col := collector.NewCollectorFromConfig(conf) - col := collector.NewMultiCollector( - []collector.Collector{ - NewMesosMasterCollector(*mmhost, *mmport), - NewMesosSlaveCollector(*mshost, *msport), - NewMarathonCollector(*marathonHost, *marathonPort), - }) + influxdb := store.NewInfluxdbFromConfig( + conf, + getStringParam(InfluxdbUserEnvName, InfluxdbUser), + getStringParam(InfluxdbPassEnvName, InfluxdbPass)) - subscription := NewCollectorSubscription(lapse, &col, &influxdb) + subscription := NewCollectorSubscription(&conf.Lapse, &col, &influxdb) - go report(&subscription) + go report(&subscription, conf.InfluxDB.CheckLapse) - time.Sleep(time.Second * time.Duration(*dieAfter)) + time.Sleep(time.Second * time.Duration(conf.DieAfter)) subscription.Cancel() log.Println("Mesos collector stopped") @@ -98,8 +69,8 @@ func getIntParam(envName string, defaultValue int) int { return env } -func report(subscription *Subscription) { - ticker := time.NewTicker(time.Second * time.Duration(DefaultLogLapse)) +func report(subscription *Subscription, lapse int) { + ticker := time.NewTicker(time.Second * time.Duration(lapse)) var collects int for _ = range ticker.C { collects = <-subscription.Stats diff --git a/marathon/README.md b/marathon/README.md deleted file mode 100644 index 5a3f483..0000000 --- a/marathon/README.md +++ /dev/null @@ -1,18 +0,0 @@ -Running the mesos-influxdb-collector on marathon -==== - -The easiest way to deploy the mesos-influxdb-collector in your cluster is by setting the `instance` value to `2` and forcing the scheduller to deploy them in different slaves. - -## Create a marathon app definition - -Fix the [mesos-influxdb-collector.json](https://github.com/kpacha/mesos-influxdb-collector/blob/master/marathon/mesos-influxdb-collector.json) file. The critical params to customize are `MESOS_HOST` and `INFLUXDB_HOST. - -## Deploy it! - -After updating the app definition, send it to your marathon instance (once again, replace the `$MARATHON_HOST` & `$MARATHON_PORT` with the right value for your environment) - -``` -$ curl -iH'Content-Type: application/json' -XPUT \ - -d@mesos-influxdb-collector.json \ - http://$MARATHON_HOST:$MARATHON_PORT/v2/apps/mesos-influxdb-collector -``` diff --git a/parser/mesos/master.go b/parser/mesos/master.go index 4b6a772..c514f17 100644 --- a/parser/mesos/master.go +++ b/parser/mesos/master.go @@ -10,7 +10,8 @@ import ( ) type MasterParser struct { - Node string + Node string + Leader bool } func (mp MasterParser) Parse(r io.ReadCloser) ([]store.Point, error) { @@ -25,25 +26,26 @@ func (mp MasterParser) Parse(r io.ReadCloser) ([]store.Point, error) { log.Println("Error parsing to MasterStats") return []store.Point{}, err } - stats.Node = mp.Node stats.Time = time.Now() return mp.getMesosPoints(stats), nil } func (mp MasterParser) getMesosPoints(stats MasterStats) []store.Point { - return []store.Point{ - mp.getCpuPoint(stats), - mp.getDiskPoint(stats), - mp.getMemPoint(stats), - mp.getSystemPoint(stats), - //mp.getRegistrarPoint(stats), - mp.getTasksPoint(stats), - mp.getFrameworksPoint(stats), - mp.getSlavesPoint(stats), - mp.getEventQueuePoint(stats), - mp.getMessagesPoint(stats), - mp.getGlobalPoint(stats), + ps := []store.Point{mp.getSystemPoint(stats)} + if mp.Leader { + ps = append(ps, + mp.getCpuPoint(stats), + mp.getDiskPoint(stats), + mp.getMemPoint(stats), + //mp.getRegistrarPoint(stats), + mp.getTasksPoint(stats), + mp.getFrameworksPoint(stats), + mp.getSlavesPoint(stats), + mp.getEventQueuePoint(stats), + mp.getMessagesPoint(stats), + mp.getGlobalPoint(stats)) } + return ps } func (mp MasterParser) getCpuPoint(stats MasterStats) store.Point { @@ -103,15 +105,15 @@ func (mp MasterParser) getSystemPoint(stats MasterStats) store.Point { return store.Point{ Measurement: "system", Tags: map[string]string{ - "node": stats.Node, + "node": mp.Node, }, Fields: map[string]interface{}{ "cpus_total": stats.System_cpusTotal, "load_15min": stats.System_load15min, "load_1min": stats.System_load1min, "load_5min": stats.System_load5min, - "mem_free_bytes": stats.System_memFreeBytes, - "mem_total_bytes": stats.System_memTotalBytes, + "mem_free_bytes": int(stats.System_memFreeBytes), + "mem_total_bytes": int(stats.System_memTotalBytes), }, Time: stats.Time, } @@ -208,88 +210,88 @@ func (mp MasterParser) getGlobalPoint(stats MasterStats) store.Point { } type MasterStats struct { - Allocator_eventQueueDispatches int `json:"allocator/event_queue_dispatches"` + Allocator_eventQueueDispatches float64 `json:"allocator/event_queue_dispatches"` Master_cpusPercent float64 `json:"master/cpus_percent"` Master_cpusRevocablePercent float64 `json:"master/cpus_revocable_percent"` Master_cpusRevocableTotal float64 `json:"master/cpus_revocable_total"` Master_cpusRevocableUsed float64 `json:"master/cpus_revocable_used"` - Master_cpusTotal int `json:"master/cpus_total"` + Master_cpusTotal float64 `json:"master/cpus_total"` Master_cpusUsed float64 `json:"master/cpus_used"` Master_diskPercent float64 `json:"master/disk_percent"` Master_diskRevocablePercent float64 `json:"master/disk_revocable_percent"` - Master_diskRevocableTotal int `json:"master/disk_revocable_total"` - Master_diskRevocableUsed int `json:"master/disk_revocable_used"` - Master_diskTotal int `json:"master/disk_total"` - Master_diskUsed int `json:"master/disk_used"` - Master_droppedMessages int `json:"master/dropped_messages"` - Master_elected int `json:"master/elected"` - Master_eventQueueDispatches int `json:"master/event_queue_dispatches"` - Master_eventQueueHTTPRequests int `json:"master/event_queue_http_requests"` - Master_eventQueueMessages int `json:"master/event_queue_messages"` - Master_frameworksActive int `json:"master/frameworks_active"` - Master_frameworksConnected int `json:"master/frameworks_connected"` - Master_frameworksDisconnected int `json:"master/frameworks_disconnected"` - Master_frameworksInactive int `json:"master/frameworks_inactive"` - Master_invalidFrameworkToExecutorMessages int `json:"master/invalid_framework_to_executor_messages"` - Master_invalidStatusUpdateAcknowledgements int `json:"master/invalid_status_update_acknowledgements"` - Master_invalidStatusUpdates int `json:"master/invalid_status_updates"` + Master_diskRevocableTotal float64 `json:"master/disk_revocable_total"` + Master_diskRevocableUsed float64 `json:"master/disk_revocable_used"` + Master_diskTotal float64 `json:"master/disk_total"` + Master_diskUsed float64 `json:"master/disk_used"` + Master_droppedMessages float64 `json:"master/dropped_messages"` + Master_elected float64 `json:"master/elected"` + Master_eventQueueDispatches float64 `json:"master/event_queue_dispatches"` + Master_eventQueueHTTPRequests float64 `json:"master/event_queue_http_requests"` + Master_eventQueueMessages float64 `json:"master/event_queue_messages"` + Master_frameworksActive float64 `json:"master/frameworks_active"` + Master_frameworksConnected float64 `json:"master/frameworks_connected"` + Master_frameworksDisconnected float64 `json:"master/frameworks_disconnected"` + Master_frameworksInactive float64 `json:"master/frameworks_inactive"` + Master_invalidFrameworkToExecutorMessages float64 `json:"master/invalid_framework_to_executor_messages"` + Master_invalidStatusUpdateAcknowledgements float64 `json:"master/invalid_status_update_acknowledgements"` + Master_invalidStatusUpdates float64 `json:"master/invalid_status_updates"` Master_memPercent float64 `json:"master/mem_percent"` Master_memRevocablePercent float64 `json:"master/mem_revocable_percent"` - Master_memRevocableTotal int `json:"master/mem_revocable_total"` - Master_memRevocableUsed int `json:"master/mem_revocable_used"` - Master_memTotal int `json:"master/mem_total"` - Master_memUsed int `json:"master/mem_used"` - Master_messagesAuthenticate int `json:"master/messages_authenticate"` - Master_messagesDeactivateFramework int `json:"master/messages_deactivate_framework"` - Master_messagesDeclineOffers int `json:"master/messages_decline_offers"` - Master_messagesExitedExecutor int `json:"master/messages_exited_executor"` - Master_messagesFrameworkToExecutor int `json:"master/messages_framework_to_executor"` - Master_messagesKillTask int `json:"master/messages_kill_task"` - Master_messagesLaunchTasks int `json:"master/messages_launch_tasks"` - Master_messagesReconcileTasks int `json:"master/messages_reconcile_tasks"` - Master_messagesRegisterFramework int `json:"master/messages_register_framework"` - Master_messagesRegisterSlave int `json:"master/messages_register_slave"` - Master_messagesReregisterFramework int `json:"master/messages_reregister_framework"` - Master_messagesReregisterSlave int `json:"master/messages_reregister_slave"` - Master_messagesResourceRequest int `json:"master/messages_resource_request"` - Master_messagesReviveOffers int `json:"master/messages_revive_offers"` - Master_messagesStatusUpdate int `json:"master/messages_status_update"` - Master_messagesStatusUpdateAcknowledgement int `json:"master/messages_status_update_acknowledgement"` - Master_messagesUnregisterFramework int `json:"master/messages_unregister_framework"` - Master_messagesUnregisterSlave int `json:"master/messages_unregister_slave"` - Master_messagesUpdateSlave int `json:"master/messages_update_slave"` - Master_outstandingOffers int `json:"master/outstanding_offers"` - Master_recoverySlaveRemovals int `json:"master/recovery_slave_removals"` - Master_slaveRegistrations int `json:"master/slave_registrations"` - Master_slaveRemovals int `json:"master/slave_removals"` - Master_slaveRemovals_reasonRegistered int `json:"master/slave_removals/reason_registered"` - Master_slaveRemovals_reasonUnhealthy int `json:"master/slave_removals/reason_unhealthy"` - Master_slaveRemovals_reasonUnregistered int `json:"master/slave_removals/reason_unregistered"` - Master_slaveReregistrations int `json:"master/slave_reregistrations"` - Master_slaveShutdownsCanceled int `json:"master/slave_shutdowns_canceled"` - Master_slaveShutdownsCompleted int `json:"master/slave_shutdowns_completed"` - Master_slaveShutdownsScheduled int `json:"master/slave_shutdowns_scheduled"` - Master_slavesActive int `json:"master/slaves_active"` - Master_slavesConnected int `json:"master/slaves_connected"` - Master_slavesDisconnected int `json:"master/slaves_disconnected"` - Master_slavesInactive int `json:"master/slaves_inactive"` - Master_tasksError int `json:"master/tasks_error"` - Master_tasksFailed int `json:"master/tasks_failed"` - Master_tasksFinished int `json:"master/tasks_finished"` - Master_tasksKilled int `json:"master/tasks_killed"` - Master_tasksLost int `json:"master/tasks_lost"` - Master_tasksRunning int `json:"master/tasks_running"` - Master_tasksStaging int `json:"master/tasks_staging"` - Master_tasksStarting int `json:"master/tasks_starting"` + Master_memRevocableTotal float64 `json:"master/mem_revocable_total"` + Master_memRevocableUsed float64 `json:"master/mem_revocable_used"` + Master_memTotal float64 `json:"master/mem_total"` + Master_memUsed float64 `json:"master/mem_used"` + Master_messagesAuthenticate float64 `json:"master/messages_authenticate"` + Master_messagesDeactivateFramework float64 `json:"master/messages_deactivate_framework"` + Master_messagesDeclineOffers float64 `json:"master/messages_decline_offers"` + Master_messagesExitedExecutor float64 `json:"master/messages_exited_executor"` + Master_messagesFrameworkToExecutor float64 `json:"master/messages_framework_to_executor"` + Master_messagesKillTask float64 `json:"master/messages_kill_task"` + Master_messagesLaunchTasks float64 `json:"master/messages_launch_tasks"` + Master_messagesReconcileTasks float64 `json:"master/messages_reconcile_tasks"` + Master_messagesRegisterFramework float64 `json:"master/messages_register_framework"` + Master_messagesRegisterSlave float64 `json:"master/messages_register_slave"` + Master_messagesReregisterFramework float64 `json:"master/messages_reregister_framework"` + Master_messagesReregisterSlave float64 `json:"master/messages_reregister_slave"` + Master_messagesResourceRequest float64 `json:"master/messages_resource_request"` + Master_messagesReviveOffers float64 `json:"master/messages_revive_offers"` + Master_messagesStatusUpdate float64 `json:"master/messages_status_update"` + Master_messagesStatusUpdateAcknowledgement float64 `json:"master/messages_status_update_acknowledgement"` + Master_messagesUnregisterFramework float64 `json:"master/messages_unregister_framework"` + Master_messagesUnregisterSlave float64 `json:"master/messages_unregister_slave"` + Master_messagesUpdateSlave float64 `json:"master/messages_update_slave"` + Master_outstandingOffers float64 `json:"master/outstanding_offers"` + Master_recoverySlaveRemovals float64 `json:"master/recovery_slave_removals"` + Master_slaveRegistrations float64 `json:"master/slave_registrations"` + Master_slaveRemovals float64 `json:"master/slave_removals"` + Master_slaveRemovals_reasonRegistered float64 `json:"master/slave_removals/reason_registered"` + Master_slaveRemovals_reasonUnhealthy float64 `json:"master/slave_removals/reason_unhealthy"` + Master_slaveRemovals_reasonUnregistered float64 `json:"master/slave_removals/reason_unregistered"` + Master_slaveReregistrations float64 `json:"master/slave_reregistrations"` + Master_slaveShutdownsCanceled float64 `json:"master/slave_shutdowns_canceled"` + Master_slaveShutdownsCompleted float64 `json:"master/slave_shutdowns_completed"` + Master_slaveShutdownsScheduled float64 `json:"master/slave_shutdowns_scheduled"` + Master_slavesActive float64 `json:"master/slaves_active"` + Master_slavesConnected float64 `json:"master/slaves_connected"` + Master_slavesDisconnected float64 `json:"master/slaves_disconnected"` + Master_slavesInactive float64 `json:"master/slaves_inactive"` + Master_tasksError float64 `json:"master/tasks_error"` + Master_tasksFailed float64 `json:"master/tasks_failed"` + Master_tasksFinished float64 `json:"master/tasks_finished"` + Master_tasksKilled float64 `json:"master/tasks_killed"` + Master_tasksLost float64 `json:"master/tasks_lost"` + Master_tasksRunning float64 `json:"master/tasks_running"` + Master_tasksStaging float64 `json:"master/tasks_staging"` + Master_tasksStarting float64 `json:"master/tasks_starting"` Master_uptimeSecs float64 `json:"master/uptime_secs"` - Master_validFrameworkToExecutorMessages int `json:"master/valid_framework_to_executor_messages"` - Master_validStatusUpdateAcknowledgements int `json:"master/valid_status_update_acknowledgements"` - Master_validStatusUpdates int `json:"master/valid_status_updates"` - Registrar_queuedOperations int `json:"registrar/queued_operations"` - Registrar_registrySizeBytes int `json:"registrar/registry_size_bytes"` + Master_validFrameworkToExecutorMessages float64 `json:"master/valid_framework_to_executor_messages"` + Master_validStatusUpdateAcknowledgements float64 `json:"master/valid_status_update_acknowledgements"` + Master_validStatusUpdates float64 `json:"master/valid_status_updates"` + Registrar_queuedOperations float64 `json:"registrar/queued_operations"` + Registrar_registrySizeBytes float64 `json:"registrar/registry_size_bytes"` Registrar_stateFetchMs float64 `json:"registrar/state_fetch_ms"` Registrar_stateStoreMs float64 `json:"registrar/state_store_ms"` - Registrar_stateStoreMs_count int `json:"registrar/state_store_ms/count"` + Registrar_stateStoreMs_count float64 `json:"registrar/state_store_ms/count"` Registrar_stateStoreMs_max float64 `json:"registrar/state_store_ms/max"` Registrar_stateStoreMs_min float64 `json:"registrar/state_store_ms/min"` Registrar_stateStoreMs_p50 float64 `json:"registrar/state_store_ms/p50"` @@ -298,12 +300,11 @@ type MasterStats struct { Registrar_stateStoreMs_p99 float64 `json:"registrar/state_store_ms/p99"` Registrar_stateStoreMs_p999 float64 `json:"registrar/state_store_ms/p999"` Registrar_stateStoreMs_p9999 float64 `json:"registrar/state_store_ms/p9999"` - System_cpusTotal int `json:"system/cpus_total"` + System_cpusTotal float64 `json:"system/cpus_total"` System_load15min float64 `json:"system/load_15min"` System_load1min float64 `json:"system/load_1min"` System_load5min float64 `json:"system/load_5min"` - System_memFreeBytes int `json:"system/mem_free_bytes"` - System_memTotalBytes int `json:"system/mem_total_bytes"` + System_memFreeBytes float64 `json:"system/mem_free_bytes"` + System_memTotalBytes float64 `json:"system/mem_total_bytes"` Time time.Time - Node string } diff --git a/parser/mesos/slave.go b/parser/mesos/slave.go index 6c6586c..f87d084 100644 --- a/parser/mesos/slave.go +++ b/parser/mesos/slave.go @@ -117,8 +117,8 @@ func (mp SlaveParser) getSystemPoint(stats SlaveStats) store.Point { "load_15min": stats.System_load15min, "load_1min": stats.System_load1min, "load_5min": stats.System_load5min, - "mem_free_bytes": stats.System_memFreeBytes, - "mem_total_bytes": stats.System_memTotalBytes, + "mem_free_bytes": int(stats.System_memFreeBytes), + "mem_total_bytes": int(stats.System_memTotalBytes), }, Time: stats.Time, } @@ -162,52 +162,52 @@ func (mp SlaveParser) getGlobalPoint(stats SlaveStats) store.Point { } type SlaveStats struct { - Containerizer_mesos_containerDestroyErrors int `json:"containerizer/mesos/container_destroy_errors"` - Slave_containerLaunchErrors int `json:"slave/container_launch_errors"` + Containerizer_mesos_containerDestroyErrors float64 `json:"containerizer/mesos/container_destroy_errors"` + Slave_containerLaunchErrors float64 `json:"slave/container_launch_errors"` Slave_cpusPercent float64 `json:"slave/cpus_percent"` Slave_cpusRevocablePercent float64 `json:"slave/cpus_revocable_percent"` - Slave_cpusRevocableTotal int `json:"slave/cpus_revocable_total"` - Slave_cpusRevocableUsed int `json:"slave/cpus_revocable_used"` - Slave_cpusTotal int `json:"slave/cpus_total"` + Slave_cpusRevocableTotal float64 `json:"slave/cpus_revocable_total"` + Slave_cpusRevocableUsed float64 `json:"slave/cpus_revocable_used"` + Slave_cpusTotal float64 `json:"slave/cpus_total"` Slave_cpusUsed float64 `json:"slave/cpus_used"` Slave_diskPercent float64 `json:"slave/disk_percent"` Slave_diskRevocablePercent float64 `json:"slave/disk_revocable_percent"` - Slave_diskRevocableTotal int `json:"slave/disk_revocable_total"` - Slave_diskRevocableUsed int `json:"slave/disk_revocable_used"` - Slave_diskTotal int `json:"slave/disk_total"` - Slave_diskUsed int `json:"slave/disk_used"` + Slave_diskRevocableTotal float64 `json:"slave/disk_revocable_total"` + Slave_diskRevocableUsed float64 `json:"slave/disk_revocable_used"` + Slave_diskTotal float64 `json:"slave/disk_total"` + Slave_diskUsed float64 `json:"slave/disk_used"` Slave_executorDirectoryMaxAllowedAgeSecs float64 `json:"slave/executor_directory_max_allowed_age_secs"` - Slave_executorsRegistering int `json:"slave/executors_registering"` - Slave_executorsRunning int `json:"slave/executors_running"` - Slave_executorsTerminated int `json:"slave/executors_terminated"` - Slave_executorsTerminating int `json:"slave/executors_terminating"` - Slave_frameworksActive int `json:"slave/frameworks_active"` - Slave_invalidFrameworkMessages int `json:"slave/invalid_framework_messages"` - Slave_invalidStatusUpdates int `json:"slave/invalid_status_updates"` + Slave_executorsRegistering float64 `json:"slave/executors_registering"` + Slave_executorsRunning float64 `json:"slave/executors_running"` + Slave_executorsTerminated float64 `json:"slave/executors_terminated"` + Slave_executorsTerminating float64 `json:"slave/executors_terminating"` + Slave_frameworksActive float64 `json:"slave/frameworks_active"` + Slave_invalidFrameworkMessages float64 `json:"slave/invalid_framework_messages"` + Slave_invalidStatusUpdates float64 `json:"slave/invalid_status_updates"` Slave_memPercent float64 `json:"slave/mem_percent"` Slave_memRevocablePercent float64 `json:"slave/mem_revocable_percent"` - Slave_memRevocableTotal int `json:"slave/mem_revocable_total"` - Slave_memRevocableUsed int `json:"slave/mem_revocable_used"` - Slave_memTotal int `json:"slave/mem_total"` - Slave_memUsed int `json:"slave/mem_used"` - Slave_recoveryErrors int `json:"slave/recovery_errors"` - Slave_registered int `json:"slave/registered"` - Slave_tasksFailed int `json:"slave/tasks_failed"` - Slave_tasksFinished int `json:"slave/tasks_finished"` - Slave_tasksKilled int `json:"slave/tasks_killed"` - Slave_tasksLost int `json:"slave/tasks_lost"` - Slave_tasksRunning int `json:"slave/tasks_running"` - Slave_tasksStaging int `json:"slave/tasks_staging"` - Slave_tasksStarting int `json:"slave/tasks_starting"` + Slave_memRevocableTotal float64 `json:"slave/mem_revocable_total"` + Slave_memRevocableUsed float64 `json:"slave/mem_revocable_used"` + Slave_memTotal float64 `json:"slave/mem_total"` + Slave_memUsed float64 `json:"slave/mem_used"` + Slave_recoveryErrors float64 `json:"slave/recovery_errors"` + Slave_registered float64 `json:"slave/registered"` + Slave_tasksFailed float64 `json:"slave/tasks_failed"` + Slave_tasksFinished float64 `json:"slave/tasks_finished"` + Slave_tasksKilled float64 `json:"slave/tasks_killed"` + Slave_tasksLost float64 `json:"slave/tasks_lost"` + Slave_tasksRunning float64 `json:"slave/tasks_running"` + Slave_tasksStaging float64 `json:"slave/tasks_staging"` + Slave_tasksStarting float64 `json:"slave/tasks_starting"` Slave_uptimeSecs float64 `json:"slave/uptime_secs"` - Slave_validFrameworkMessages int `json:"slave/valid_framework_messages"` - Slave_validStatusUpdates int `json:"slave/valid_status_updates"` - System_cpusTotal int `json:"system/cpus_total"` + Slave_validFrameworkMessages float64 `json:"slave/valid_framework_messages"` + Slave_validStatusUpdates float64 `json:"slave/valid_status_updates"` + System_cpusTotal float64 `json:"system/cpus_total"` System_load15min float64 `json:"system/load_15min"` System_load1min float64 `json:"system/load_1min"` System_load5min float64 `json:"system/load_5min"` - System_memFreeBytes int `json:"system/mem_free_bytes"` - System_memTotalBytes int `json:"system/mem_total_bytes"` + System_memFreeBytes float64 `json:"system/mem_free_bytes"` + System_memTotalBytes float64 `json:"system/mem_total_bytes"` Time time.Time Node string } diff --git a/reader/reader.go b/reader/reader.go new file mode 100644 index 0000000..4413bf5 --- /dev/null +++ b/reader/reader.go @@ -0,0 +1,23 @@ +package reader + +import ( + "io/ioutil" + "log" + "net/http" +) + +func ReadUrl(url string) ([]byte, error) { + resp, err := http.Get(url) + if err != nil { + log.Println("Error connecting to", url) + return []byte{}, err + } + defer resp.Body.Close() + + body, err := ioutil.ReadAll(resp.Body) + if err != nil { + log.Println("Error reading from", resp.Body) + return body, err + } + return body, nil +} diff --git a/store/influxdb.go b/store/influxdb.go index 9a45baf..d86b772 100644 --- a/store/influxdb.go +++ b/store/influxdb.go @@ -7,6 +7,7 @@ import ( "time" "github.com/influxdb/influxdb/client" + "github.com/kpacha/mesos-influxdb-collector/config" ) type Store interface { @@ -47,6 +48,17 @@ type Influxdb struct { Config InfluxdbConfig } +func NewInfluxdbFromConfig(conf *config.Config, user, password string) Store { + return NewInfluxdb(InfluxdbConfig{ + Host: conf.InfluxDB.Host, + Port: conf.InfluxDB.Port, + DB: conf.InfluxDB.DB, + Username: user, + Password: password, + CheckLapse: conf.InfluxDB.CheckLapse, + }) +} + func NewInfluxdb(conf InfluxdbConfig) Store { u, err := url.Parse(fmt.Sprintf("http://%s:%d", conf.Host, conf.Port)) if err != nil {