diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100755 index 0000000..6bd8f51 --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,6 @@ +Docker Telegraf InfluxDB Grafana image +----------------------------------- + +## v1.0.0 (2019-01-09) + +* Initial release diff --git a/Dockerfile b/Dockerfile new file mode 100755 index 0000000..e58fc33 --- /dev/null +++ b/Dockerfile @@ -0,0 +1,112 @@ +FROM ubuntu:18.04 +MAINTAINER Arthur Kono + +ENV DEBIAN_FRONTEND noninteractive +ENV LANG C.UTF-8 + +# Default versions +ENV TELEGRAF_VERSION 1.9.2-1 +ENV INFLUXDB_VERSION 1.7.2 +ENV GRAFANA_VERSION 5.3.2 +ENV CHRONOGRAF_VERSION 1.7.5 + +# Database Defaults +ENV INFLUXDB_GRAFANA_DB datasource +ENV INFLUXDB_GRAFANA_USER datasource +ENV INFLUXDB_GRAFANA_PW datasource + +ENV GF_DATABASE_TYPE=sqlite3 + +#ENV MYSQL_GRAFANA_USER grafana +#ENV MYSQL_GRAFANA_PW grafana + +# Fix bad proxy issue +COPY system/99fixbadproxy /etc/apt/apt.conf.d/99fixbadproxy + +# Clear previous sources +RUN rm /var/lib/apt/lists/* -vf + +# Base dependencies + +# apt-get -y --force-yes install \ + +RUN apt-get -y update && \ + apt-get -y dist-upgrade && \ + apt-get -y install \ + apt-utils \ + ca-certificates \ + curl \ + git \ + htop \ + libfontconfig \ + nano \ + net-tools \ + openssh-server \ + supervisor \ + gnupg \ + gnupg2 \ + gnupg1 \ + snmp \ + snmp-mibs-downloader \ + wget && \ + curl -sL https://deb.nodesource.com/setup_8.x | bash - && \ + apt-get install -y nodejs + +# Configure Supervisord, SSH and base env +COPY supervisord/supervisord.conf /etc/supervisor/conf.d/supervisord.conf + +WORKDIR /root + +RUN mkdir -p /var/log/supervisor && \ + mkdir -p /var/run/sshd && \ + sed -i 's/PermitRootLogin prohibit-password/PermitRootLogin yes/' /etc/ssh/sshd_config && \ + echo 'root:root' | chpasswd && \ + rm -rf .ssh && \ + rm -rf .profile && \ + mkdir .ssh + +COPY ssh/id_rsa .ssh/id_rsa +COPY bash/profile .profile + +# Configure MySql +#COPY scripts/setup_mysql.sh /tmp/setup_mysql.sh + +#RUN /tmp/setup_mysql.sh + +# Install InfluxDB +RUN wget https://dl.influxdata.com/influxdb/releases/influxdb_${INFLUXDB_VERSION}_amd64.deb && \ + dpkg -i influxdb_${INFLUXDB_VERSION}_amd64.deb && rm influxdb_${INFLUXDB_VERSION}_amd64.deb + +# Configure InfluxDB +COPY influxdb/influxdb.conf /etc/influxdb/influxdb.conf +COPY influxdb/init.sh /etc/init.d/influxdb + +# Install Telegraf +RUN wget https://dl.influxdata.com/telegraf/releases/telegraf_${TELEGRAF_VERSION}_amd64.deb && \ + dpkg -i telegraf_${TELEGRAF_VERSION}_amd64.deb && rm telegraf_${TELEGRAF_VERSION}_amd64.deb + +# Configure Telegraf +COPY telegraf/telegraf.conf /etc/telegraf/telegraf.conf +COPY telegraf/init.sh /etc/init.d/telegraf + +# Install chronograf +RUN wget https://dl.influxdata.com/chronograf/releases/chronograf_${CHRONOGRAF_VERSION}_amd64.deb && \ + dpkg -i chronograf_${CHRONOGRAF_VERSION}_amd64.deb + +# Install Grafana +RUN wget https://s3-us-west-2.amazonaws.com/grafana-releases/release/grafana_${GRAFANA_VERSION}_amd64.deb && \ + dpkg -i grafana_${GRAFANA_VERSION}_amd64.deb && rm grafana_${GRAFANA_VERSION}_amd64.deb + +# Configure Grafana with provisioning +ADD grafana/provisioning /etc/grafana/provisioning +ADD grafana/dashboards /var/lib/grafana/dashboards +COPY grafana/grafana.ini /etc/grafana/grafana.ini + +VOLUME /var/lib/influxdb +VOLUME /var/lib/grafana + +# Cleanup +RUN apt-get clean && \ + rm -rf /var/lib/apt/lists/* /tmp/* /var/tmp/* + +CMD ["/usr/bin/supervisord"] diff --git a/LICENSE b/LICENSE new file mode 100755 index 0000000..504d163 --- /dev/null +++ b/LICENSE @@ -0,0 +1,21 @@ +The MIT License (MIT) + +Copyright (c) 2019 Arthur Kono + +Permission is hereby granted, free of charge, to any person obtaining a copy +of this software and associated documentation files (the "Software"), to deal +in the Software without restriction, including without limitation the rights +to use, copy, modify, merge, publish, distribute, sublicense, and/or sell +copies of the Software, and to permit persons to whom the Software is +furnished to do so, subject to the following conditions: + +The above copyright notice and this permission notice shall be included in all +copies or substantial portions of the Software. + +THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR +IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, +FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE +AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER +LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, +OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE +SOFTWARE. diff --git a/README.md b/README.md new file mode 100755 index 0000000..fea5fe6 --- /dev/null +++ b/README.md @@ -0,0 +1,111 @@ +# Docker Image with Telegraf, InfluxDB and Grafana +This is a Docker image based on the [Docker Image with Telegraf (StatsD), InfluxDB and Grafana](https://github.com/samuelebistoletti/docker-statsd-influxdb-grafana) from [Samuele Bistoletti](https://github.com/samuelebistoletti). + +The main point of difference with this image is: + +* Persistence is supported via mounting volumes to a Docker container +* Grafana will store its data in SQLite files instead of a MySQL table on the container, so MySQL is not installed +* Added SNMP + +## Versions + +* Docker Image: 2.2.0 +* Ubuntu: 18.04 +* InfluxDB: 1.7.2 +* Telegraf (StatsD): 1.9.2-1 +* Grafana: 5.3.2 + +## Quick Start + +To start the container the first time launch: + +```sh +docker run --ulimit nofile=66000:66000 \ + -d \ + --name docker-telegraf-influxdb-grafana \ + -p 3003:3003 \ + -p 3004:8888 \ + -p 8086:8086 \ + -p 22022:22 \ + -p 8125:8125/udp \ + -v /path/for/influxdb:/var/lib/influxdb \ + -v /path/for/grafana:/var/lib/grafana \ + artlov/docker-telegraf-influxdb-grafana:latest +``` + +You can replace `latest` with the desired version listed in changelog file. + +To stop the container launch: + +```sh +docker stop docker-telegraf-influxdb-grafana +``` + +To start the container again launch: + +```sh +docker start docker-telegraf-influxdb-grafana +``` + +## Mapped Ports + +``` +Host Container Service + +3003 3003 grafana +3004 8888 influxdb-admin (chronograf) +8086 8086 influxdb +8125 8125 telegraf +22022 22 sshd +``` +## SSH + +```sh +ssh root@localhost -p 22022 +``` +Password: root + +## Grafana + +Open + +``` +Username: root +Password: root +``` + +### Add data source on Grafana + +1. Using the wizard click on `Add data source` +2. Choose a `name` for the source and flag it as `Default` +3. Choose `InfluxDB` as `type` +4. Choose `direct` as `access` +5. Fill remaining fields as follows and click on `Add` without altering other fields + +``` +Url: http://localhost:8086 +Database: telegraf +User: telegraf +Password: telegraf +``` + +Basic auth and credentials must be left unflagged. Proxy is not required. + +Now you are ready to add your first dashboard and launch some query on database. + +## InfluxDB + +### Web Interface + +Open + +``` +Username: root +Password: root +Port: 8086 +``` + +### InfluxDB Shell (CLI) + +1. Establish a ssh connection with the container +2. Launch `influx` to open InfluxDB Shell (CLI) diff --git a/bash/profile b/bash/profile new file mode 100755 index 0000000..989fe39 --- /dev/null +++ b/bash/profile @@ -0,0 +1,11 @@ +# ~/.profile: executed by Bourne-compatible login shells. + +if [ "$BASH" ]; then + if [ -f ~/.bashrc ]; then + . ~/.bashrc + fi +fi + +mesg n + +export HOME=/root diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100755 index 0000000..2de99d4 --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,12 @@ +version: '3' + +services: + webapp: + build: artlov/docker-statsd-influxdb-grafana . + +#test: +# override: +# - docker run -d -p 3003:3003 -p 3004:8888 -p 8086:8086 -p 22022:22 -p 8125:8125/udp artlov/docker-statsd-influxdb-grafana; sleep 15 +# - curl --retry 10 --retry-delay 5 -v http://localhost:3003 +# - curl --retry 10 --retry-delay 5 -v http://localhost:3004 +# - curl --retry 10 --retry-delay 5 -v http://localhost:8086 diff --git a/grafana/dashboards/exampleDashboard.json b/grafana/dashboards/exampleDashboard.json new file mode 100755 index 0000000..ba11952 --- /dev/null +++ b/grafana/dashboards/exampleDashboard.json @@ -0,0 +1,142 @@ +{ + "annotations": { + "list": [ + { + "builtIn": 1, + "datasource": "InfluxDB", + "enable": true, + "hide": true, + "iconColor": "rgba(0, 211, 255, 1)", + "name": "Annotations & Alerts", + "type": "dashboard" + } + ] + }, + "editable": false, + "gnetId": null, + "graphTooltip": 0, + "hideControls": false, + "id": null, + "links": [], + "panels": [ + { + "aliasColors": {}, + "bars": false, + "dashLength": 10, + "dashes": false, + "datasource": null, + "fill": 1, + "gridPos": { + "h": 8, + "w": 24, + "x": 0, + "y": 0 + }, + "id": 2, + "legend": { + "avg": false, + "current": false, + "max": false, + "min": false, + "show": true, + "total": false, + "values": false + }, + "lines": true, + "linewidth": 1, + "links": [], + "nullPointMode": "null", + "percentage": false, + "pointradius": 5, + "points": false, + "renderer": "flot", + "seriesOverrides": [], + "spaceLength": 10, + "stack": false, + "steppedLine": false, + "targets": [ + { + "expr": "node_cpu{mode=\"user\"}", + "format": "time_series", + "intervalFactor": 1, + "legendFormat": "{{ cpu }}", + "refId": "A" + } + ], + "thresholds": [], + "timeFrom": null, + "timeShift": null, + "title": "cpu", + "tooltip": { + "shared": true, + "sort": 0, + "value_type": "individual" + }, + "type": "graph", + "xaxis": { + "buckets": null, + "mode": "time", + "name": null, + "show": true, + "values": [] + }, + "yaxes": [ + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + }, + { + "format": "short", + "label": null, + "logBase": 1, + "max": null, + "min": null, + "show": true + } + ] + } + ], + "schemaVersion": 16, + "style": "dark", + "tags": [], + "templating": { + "list": [] + }, + "time": { + "from": "now-15m", + "to": "now" + }, + "timepicker": { + "refresh_intervals": [ + "5s", + "10s", + "30s", + "1m", + "5m", + "15m", + "30m", + "1h", + "2h", + "1d" + ], + "time_options": [ + "5m", + "15m", + "1h", + "6h", + "12h", + "24h", + "2d", + "7d", + "30d" + ] + }, + "timezone": "", + "title": "Provisioned Dashboard", + "uid": "dw3aBiqk7", + "version": 1 +} diff --git a/grafana/grafana.ini b/grafana/grafana.ini new file mode 100755 index 0000000..2b7f10b --- /dev/null +++ b/grafana/grafana.ini @@ -0,0 +1,387 @@ +##################### Grafana Configuration Example ##################### +# +# Everything has defaults so you only need to uncomment things you want to +# change + +# possible values : production, development +; app_mode = production + +# instance name, defaults to HOSTNAME environment variable value or hostname if HOSTNAME var is empty +; instance_name = ${HOSTNAME} + +#################################### Paths #################################### +[paths] +# Path to where grafana can store temp files, sessions, and the sqlite3 db (if that is used) +# +;data = /var/lib/grafana +# +# Directory where grafana can store logs +# +;logs = /var/log/grafana +# +# Directory where grafana will automatically scan and look for plugins +# +;plugins = /var/lib/grafana/plugins + +# folder that contains provisioning config files +# that grafana will apply on startup and while running. +provisioning = /etc/grafana/provisioning +# +#################################### Server #################################### +[server] +# Protocol (http or https) +protocol = http + +# The ip address to bind to, empty will bind to all interfaces +;http_addr = + +# The http port to use +http_port = 3003 + +# The public facing domain name used to access grafana from a browser +;domain = localhost + +# Redirect to correct domain if host header does not match domain +# Prevents DNS rebinding attacks +;enforce_domain = false + +# The full public facing url you use in browser, used for redirects and emails +# If you use reverse proxy and sub path specify full url (with sub path) +root_url = http://localhost:3003 + +# Log web requests +;router_logging = false + +# the path relative working path +;static_root_path = public + +# enable gzip +;enable_gzip = false + +# https certs & key file +;cert_file = +;cert_key = + +#################################### Database #################################### +[database] +# You can configure the database connection by specifying type, host, name, user and password +# as seperate properties or as on string using the url propertie. + +# Either "mysql", "postgres" or "sqlite3", it's your choice +type = sqlite3 +host = 127.0.0.1:3306 +name = grafana +user = grafana +# If the password contains # or ; you have to wrap it with trippel quotes. Ex """#password;""" +password = grafana + +# Use either URL or the previous fields to configure the database +# Example: mysql://user:secret@host:port/database +;url = + +# For "postgres" only, either "disable", "require" or "verify-full" +;ssl_mode = disable + +# For "sqlite3" only, path relative to data_path setting +;path = grafana.db + +#################################### Session #################################### +[session] +# Either "memory", "file", "redis", "mysql", "postgres", default is "file" +;provider = file + +# Provider config options +# memory: not have any config yet +# file: session dir path, is relative to grafana data_path +# redis: config like redis server e.g. `addr=127.0.0.1:6379,pool_size=100,db=grafana` +# mysql: go-sql-driver/mysql dsn config string, e.g. `user:password@tcp(127.0.0.1:3306)/database_name` +# postgres: user=a password=b host=localhost port=5432 dbname=c sslmode=disable +;provider_config = sessions + +# Session cookie name +;cookie_name = grafana_sess + +# If you use session in https only, default is false +;cookie_secure = false + +# Session life time, default is 86400 +;session_life_time = 86400 + +#################################### Analytics #################################### +[analytics] +# Server reporting, sends usage counters to stats.grafana.org every 24 hours. +# No ip addresses are being tracked, only simple counters to track +# running instances, dashboard and error counts. It is very helpful to us. +# Change this option to false to disable reporting. +;reporting_enabled = true + +# Set to false to disable all checks to https://grafana.net +# for new vesions (grafana itself and plugins), check is used +# in some UI views to notify that grafana or plugin update exists +# This option does not cause any auto updates, nor send any information +# only a GET request to http://grafana.net to get latest versions +;check_for_updates = true + +# Google Analytics universal tracking code, only enabled if you specify an id here +;google_analytics_ua_id = + +#################################### Security #################################### +[security] +# default admin user, created on startup +admin_user = root + +# default admin password, can be changed before first start of grafana, or in profile settings +admin_password = root + +# used for signing +;secret_key = SW2YcwTIb9zpOOhoPsMm + +# Auto-login remember days +;login_remember_days = 7 +;cookie_username = grafana_user +;cookie_remember_name = grafana_remember + +# disable gravatar profile images +;disable_gravatar = false + +# data source proxy whitelist (ip_or_domain:port separated by spaces) +;data_source_proxy_whitelist = + +[snapshots] +# snapshot sharing options +;external_enabled = true +;external_snapshot_url = https://snapshots-origin.raintank.io +;external_snapshot_name = Publish to snapshot.raintank.io + +# remove expired snapshot +;snapshot_remove_expired = true + +# remove snapshots after 90 days +;snapshot_TTL_days = 90 + +#################################### Users #################################### +[users] +# disable user signup / registration +;allow_sign_up = true + +# Allow non admin users to create organizations +;allow_org_create = true + +# Set to true to automatically assign new users to the default organization (id 1) +;auto_assign_org = true + +# Default role new users will be automatically assigned (if disabled above is set to true) +;auto_assign_org_role = Viewer + +# Background text for the user field on the login page +;login_hint = email or username + +# Default UI theme ("dark" or "light") +;default_theme = dark + +[auth] +# Set to true to disable (hide) the login form, useful if you use OAuth, defaults to false +;disable_login_form = false + +#################################### Anonymous Auth ########################## +[auth.anonymous] +# enable anonymous access +;enabled = false + +# specify organization name that should be used for unauthenticated users +;org_name = Main Org. + +# specify role for unauthenticated users +;org_role = Viewer + +#################################### Github Auth ########################## +[auth.github] +;enabled = false +;allow_sign_up = true +;client_id = some_id +;client_secret = some_secret +;scopes = user:email,read:org +;auth_url = https://github.com/login/oauth/authorize +;token_url = https://github.com/login/oauth/access_token +;api_url = https://api.github.com/user +;team_ids = +;allowed_organizations = + +#################################### Google Auth ########################## +[auth.google] +;enabled = false +;allow_sign_up = true +;client_id = some_client_id +;client_secret = some_client_secret +;scopes = https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email +;auth_url = https://accounts.google.com/o/oauth2/auth +;token_url = https://accounts.google.com/o/oauth2/token +;api_url = https://www.googleapis.com/oauth2/v1/userinfo +;allowed_domains = + +#################################### Generic OAuth ########################## +[auth.generic_oauth] +;enabled = false +;name = OAuth +;allow_sign_up = true +;client_id = some_id +;client_secret = some_secret +;scopes = user:email,read:org +;auth_url = https://foo.bar/login/oauth/authorize +;token_url = https://foo.bar/login/oauth/access_token +;api_url = https://foo.bar/user +;team_ids = +;allowed_organizations = + +#################################### Grafana.net Auth #################### +[auth.grafananet] +;enabled = false +;allow_sign_up = true +;client_id = some_id +;client_secret = some_secret +;scopes = user:email +;allowed_organizations = + +#################################### Auth Proxy ########################## +[auth.proxy] +;enabled = false +;header_name = X-WEBAUTH-USER +;header_property = username +;auto_sign_up = true +;ldap_sync_ttl = 60 +;whitelist = 192.168.1.1, 192.168.2.1 + +#################################### Basic Auth ########################## +[auth.basic] +;enabled = true + +#################################### Auth LDAP ########################## +[auth.ldap] +;enabled = false +;config_file = /etc/grafana/ldap.toml +;allow_sign_up = true + +#################################### SMTP / Emailing ########################## +[smtp] +;enabled = false +;host = localhost:25 +;user = +;password = +;cert_file = +;key_file = +;skip_verify = false +;from_address = admin@grafana.localhost + +[emails] +;welcome_email_on_sign_up = false + +#################################### Logging ########################## +[log] +# Either "console", "file", "syslog". Default is console and file +# Use space to separate multiple modes, e.g. "console file" +;mode = console file + +# Either "trace", "debug", "info", "warn", "error", "critical", default is "info" +;level = info + +# optional settings to set different levels for specific loggers. Ex filters = sqlstore:debug +;filters = + + +# For "console" mode only +[log.console] +;level = + +# log line format, valid options are text, console and json +;format = console + +# For "file" mode only +[log.file] +;level = + +# log line format, valid options are text, console and json +;format = text + +# This enables automated log rotate(switch of following options), default is true +;log_rotate = true + +# Max line number of single file, default is 1000000 +;max_lines = 1000000 + +# Max size shift of single file, default is 28 means 1 << 28, 256MB +;max_size_shift = 28 + +# Segment log daily, default is true +;daily_rotate = true + +# Expired days of log file(delete after max days), default is 7 +;max_days = 7 + +[log.syslog] +;level = + +# log line format, valid options are text, console and json +;format = text + +# Syslog network type and address. This can be udp, tcp, or unix. If left blank, the default unix endpoints will be used. +;network = +;address = + +# Syslog facility. user, daemon and local0 through local7 are valid. +;facility = + +# Syslog tag. By default, the process' argv[0] is used. +;tag = + + +#################################### AMQP Event Publisher ########################## +[event_publisher] +;enabled = false +;rabbitmq_url = amqp://localhost/ +;exchange = grafana_events + +;#################################### Dashboard JSON files ########################## +[dashboards.json] +;enabled = false +;path = /var/lib/grafana/dashboards + +#################################### Alerting ###################################### +[alerting] +# Makes it possible to turn off alert rule execution. +;execute_alerts = true + +#################################### Internal Grafana Metrics ########################## +# Metrics available at HTTP API Url /api/metrics +[metrics] +# Disable / Enable internal metrics +;enabled = true + +# Publish interval +;interval_seconds = 10 + +# Send internal metrics to Graphite +[metrics.graphite] +# Enable by setting the address setting (ex localhost:2003) +;address = +;prefix = prod.grafana.%(instance_name)s. + +#################################### Internal Grafana Metrics ########################## +# Url used to to import dashboards directly from Grafana.net +[grafana_net] +;url = https://grafana.net + +#################################### External image storage ########################## +[external_image_storage] +# Used for uploading images to public servers so they can be included in slack/email messages. +# you can choose between (s3, webdav) +;provider = + +[external_image_storage.s3] +;bucket_url = +;access_key = +;secret_key = + +[external_image_storage.webdav] +;url = +;username = +;password = diff --git a/grafana/provisioning/dashboards/dashboardReference.yml b/grafana/provisioning/dashboards/dashboardReference.yml new file mode 100755 index 0000000..256c0af --- /dev/null +++ b/grafana/provisioning/dashboards/dashboardReference.yml @@ -0,0 +1,6 @@ +- name: 'default' + org_id: 1 + folder: '' + type: 'file' + options: + folder: '/var/lib/grafana/dashboards' diff --git a/grafana/provisioning/datasources/influxDB.yml b/grafana/provisioning/datasources/influxDB.yml new file mode 100755 index 0000000..67b5b7e --- /dev/null +++ b/grafana/provisioning/datasources/influxDB.yml @@ -0,0 +1,9 @@ +datasources: + - name: InfluxDB + type: influxdb + access: proxy + database: telegraf + user: telegraf + password: telegraf + url: http://localhost:8086 + diff --git a/influxdb/influxdb.conf b/influxdb/influxdb.conf new file mode 100755 index 0000000..9ee5adb --- /dev/null +++ b/influxdb/influxdb.conf @@ -0,0 +1,439 @@ +### Welcome to the InfluxDB configuration file. + +# The values in this file override the default values used by the system if +# a config option is not specified. The commented out lines are the the configuration +# field and the default value used. Uncommentting a line and changing the value +# will change the value used at runtime when the process is restarted. + +# Once every 24 hours InfluxDB will report usage data to usage.influxdata.com +# The data includes a random ID, os, arch, version, the number of series and other +# usage data. No data from user databases is ever transmitted. +# Change this option to true to disable reporting. +# reporting-disabled = false + +# we'll try to get the hostname automatically, but if it the os returns something +# that isn't resolvable by other servers in the cluster, use this option to +# manually set the hostname +# hostname = "localhost" + +### +### [meta] +### +### Controls the parameters for the Raft consensus group that stores metadata +### about the InfluxDB cluster. +### + +[meta] + # Where the metadata/raft database is stored + dir = "/var/lib/influxdb/meta" + + # Automatically create a default retention policy when creating a database. + # retention-autocreate = true + + # If log messages are printed for the meta service + # logging-enabled = true + +### +### [data] +### +### Controls where the actual shard data for InfluxDB lives and how it is +### flushed from the WAL. "dir" may need to be changed to a suitable place +### for your system, but the WAL settings are an advanced configuration. The +### defaults should work for most systems. +### + +[data] + # The directory where the TSM storage engine stores TSM files. + dir = "/var/lib/influxdb/data" + + # The directory where the TSM storage engine stores WAL files. + wal-dir = "/var/lib/influxdb/wal" + + # Trace logging provides more verbose output around the tsm engine. Turning + # this on can provide more useful output for debugging tsm engine issues. + # trace-logging-enabled = false + + # Whether queries should be logged before execution. Very useful for troubleshooting, but will + # log any sensitive data contained within a query. + # query-log-enabled = true + + # Settings for the TSM engine + + # CacheMaxMemorySize is the maximum size a shard's cache can + # reach before it starts rejecting writes. + # cache-max-memory-size = 1048576000 + + # CacheSnapshotMemorySize is the size at which the engine will + # snapshot the cache and write it to a TSM file, freeing up memory + # cache-snapshot-memory-size = 26214400 + + # CacheSnapshotWriteColdDuration is the length of time at + # which the engine will snapshot the cache and write it to + # a new TSM file if the shard hasn't received writes or deletes + # cache-snapshot-write-cold-duration = "10m" + + # CompactFullWriteColdDuration is the duration at which the engine + # will compact all TSM files in a shard if it hasn't received a + # write or delete + # compact-full-write-cold-duration = "4h" + + # The maximum series allowed per database before writes are dropped. This limit can prevent + # high cardinality issues at the database level. This limit can be disabled by setting it to + # 0. + # max-series-per-database = 1000000 + + # The maximum number of tag values per tag that are allowed before writes are dropped. This limit + # can prevent high cardinality tag values from being written to a measurement. This limit can be + # disabled by setting it to 0. + # max-values-per-tag = 100000 + +### +### [coordinator] +### +### Controls the clustering service configuration. +### + +[coordinator] + # The default time a write request will wait until a "timeout" error is returned to the caller. + # write-timeout = "10s" + + # The maximum number of concurrent queries allowed to be executing at one time. If a query is + # executed and exceeds this limit, an error is returned to the caller. This limit can be disabled + # by setting it to 0. + # max-concurrent-queries = 0 + + # The maximum time a query will is allowed to execute before being killed by the system. This limit + # can help prevent run away queries. Setting the value to 0 disables the limit. + # query-timeout = "0s" + + # The the time threshold when a query will be logged as a slow query. This limit can be set to help + # discover slow or resource intensive queries. Setting the value to 0 disables the slow query logging. + # log-queries-after = "0s" + + # The maximum number of points a SELECT can process. A value of 0 will make the maximum + # point count unlimited. + # max-select-point = 0 + + # The maximum number of series a SELECT can run. A value of 0 will make the maximum series + # count unlimited. + + # The maximum number of series a SELECT can run. A value of zero will make the maximum series + # count unlimited. + # max-select-series = 0 + + # The maxium number of group by time bucket a SELECt can create. A value of zero will max the maximum + # number of buckets unlimited. + # max-select-buckets = 0 + +### +### [retention] +### +### Controls the enforcement of retention policies for evicting old data. +### + +[retention] + # Determines whether retention policy enforcment enabled. + # enabled = true + + # The interval of time when retention policy enforcement checks run. + # check-interval = "30m" + +### +### [shard-precreation] +### +### Controls the precreation of shards, so they are available before data arrives. +### Only shards that, after creation, will have both a start- and end-time in the +### future, will ever be created. Shards are never precreated that would be wholly +### or partially in the past. + +[shard-precreation] + # Determines whether shard pre-creation service is enabled. + # enabled = true + + # The interval of time when the check to pre-create new shards runs. + # check-interval = "10m" + + # The default period ahead of the endtime of a shard group that its successor + # group is created. + # advance-period = "30m" + +### +### Controls the system self-monitoring, statistics and diagnostics. +### +### The internal database for monitoring data is created automatically if +### if it does not already exist. The target retention within this database +### is called 'monitor' and is also created with a retention period of 7 days +### and a replication factor of 1, if it does not exist. In all cases the +### this retention policy is configured as the default for the database. + +[monitor] + # Whether to record statistics internally. + # store-enabled = true + + # The destination database for recorded statistics + # store-database = "_internal" + + # The interval at which to record statistics + # store-interval = "10s" + +### +### [admin] +### +### Controls the availability of the built-in, web-based admin interface. If HTTPS is +### enabled for the admin interface, HTTPS must also be enabled on the [http] service. +### +### NOTE: This interface is deprecated as of 1.1.0 and will be removed in a future release. + +[admin] + # Determines whether the admin service is enabled. + enabled = true + + # The default bind address used by the admin service. + bind-address = ":8083" + + # Whether the admin service should use HTTPS. + # https-enabled = false + + # The SSL certificate used when HTTPS is enabled. + # https-certificate = "/etc/ssl/influxdb.pem" + +### +### [http] +### +### Controls how the HTTP endpoints are configured. These are the primary +### mechanism for getting data into and out of InfluxDB. +### + +[http] + # Determines whether HTTP endpoint is enabled. + # enabled = true + + # The bind address used by the HTTP service. + # bind-address = ":8086" + + # Determines whether HTTP authentication is enabled. + # auth-enabled = false + + # The default realm sent back when issuing a basic auth challenge. + # realm = "InfluxDB" + + # Determines whether HTTP request logging is enable.d + # log-enabled = true + + # Determines whether detailed write logging is enabled. + # write-tracing = false + + # Determines whether the pprof endpoint is enabled. This endpoint is used for + # troubleshooting and monitoring. + # pprof-enabled = true + + # Determines whether HTTPS is enabled. + # https-enabled = false + + # The SSL certificate to use when HTTPS is enabled. + # https-certificate = "/etc/ssl/influxdb.pem" + + # Use a separate private key location. + # https-private-key = "" + + # The JWT auth shared secret to validate requests using JSON web tokens. + # shared-sercret = "" + + # The default chunk size for result sets that should be chunked. + # max-row-limit = 10000 + + # The maximum number of HTTP connections that may be open at once. New connections that + # would exceed this limit are dropped. Setting this value to 0 disables the limit. + # max-connection-limit = 0 + + # Enable http service over unix domain socket + # unix-socket-enabled = false + + # The path of the unix domain socket. + # bind-socket = "/var/run/influxdb.sock" + +### +### [subscriber] +### +### Controls the subscriptions, which can be used to fork a copy of all data +### received by the InfluxDB host. +### + +[subscriber] + # Determines whether the subscriber service is enabled. + # enabled = true + + # The default timeout for HTTP writes to subscribers. + # http-timeout = "30s" + + # Allows insecure HTTPS connections to subscribers. This is useful when testing with self- + # signed certificates. + # insecure-skip-verify = false + + # The path to the PEM encoded CA certs file. If the empty string, the default system certs will be used + # ca-certs = "" + + # The number of writer goroutines processing the write channel. + # write-concurrency = 40 + + # The number of in-flight writes buffered in the write channel. + # write-buffer-size = 1000 + + +### +### [[graphite]] +### +### Controls one or many listeners for Graphite data. +### + +[[graphite]] + # Determines whether the graphite endpoint is enabled. + # enabled = false + # database = "graphite" + # retention-policy = "" + # bind-address = ":2003" + # protocol = "tcp" + # consistency-level = "one" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # Flush if this many points get buffered + # batch-size = 5000 + + # number of batches that may be pending in memory + # batch-pending = 10 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # udp-read-buffer = 0 + + ### This string joins multiple matching 'measurement' values providing more control over the final measurement name. + # separator = "." + + ### Default tags that will be added to all metrics. These can be overridden at the template level + ### or by tags extracted from metric + # tags = ["region=us-east", "zone=1c"] + + ### Each template line requires a template pattern. It can have an optional + ### filter before the template and separated by spaces. It can also have optional extra + ### tags following the template. Multiple tags should be separated by commas and no spaces + ### similar to the line protocol format. There can be only one default template. + # templates = [ + # "*.app env.service.resource.measurement", + # # Default template + # "server.*", + # ] + +### +### [collectd] +### +### Controls one or many listeners for collectd data. +### + +[[collectd]] + # enabled = false + # bind-address = ":25826" + # database = "collectd" + # retention-policy = "" + # + # The collectd service supports either scanning a directory for multiple types + # db files, or specifying a single db file. + # typesdb = "/usr/local/share/collectd" + # + # security-level = "none" + # auth-file = "/etc/collectd/auth_file" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # Flush if this many points get buffered + # batch-size = 5000 + + # Number of batches that may be pending in memory + # batch-pending = 10 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "10s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # read-buffer = 0 + +### +### [opentsdb] +### +### Controls one or many listeners for OpenTSDB data. +### + +[[opentsdb]] + # enabled = false + # bind-address = ":4242" + # database = "opentsdb" + # retention-policy = "" + # consistency-level = "one" + # tls-enabled = false + # certificate= "/etc/ssl/influxdb.pem" + + # Log an error for every malformed point. + # log-point-errors = true + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Only points + # metrics received over the telnet protocol undergo batching. + + # Flush if this many points get buffered + # batch-size = 1000 + + # Number of batches that may be pending in memory + # batch-pending = 5 + + # Flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" + +### +### [[udp]] +### +### Controls the listeners for InfluxDB line protocol data via UDP. +### + +[[udp]] + # enabled = false + # bind-address = ":8089" + # database = "udp" + # retention-policy = "" + + # These next lines control how batching works. You should have this enabled + # otherwise you could get dropped metrics or poor performance. Batching + # will buffer points in memory if you have many coming in. + + # Flush if this many points get buffered + # batch-size = 5000 + + # Number of batches that may be pending in memory + # batch-pending = 10 + + # Will flush at least this often even if we haven't hit buffer limit + # batch-timeout = "1s" + + # UDP Read buffer size, 0 means OS default. UDP listener will fail if set above OS max. + # read-buffer = 0 + +### +### [continuous_queries] +### +### Controls how continuous queries are run within InfluxDB. +### + +[continuous_queries] + # Determiens whether the continuous query service is enabled. + # enabled = true + + # Controls whether queries are logged when executed by the CQ service. + # log-enabled = true + + # interval for how often continuous queries will be checked if they need to run + # run-interval = "1s" diff --git a/influxdb/init.sh b/influxdb/init.sh new file mode 100755 index 0000000..23e61d5 --- /dev/null +++ b/influxdb/init.sh @@ -0,0 +1,235 @@ +#!/bin/bash +### BEGIN INIT INFO +# Provides: influxd +# Required-Start: $all +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Start the InfluxDB process +### END INIT INFO + +# If you modify this, please make sure to also edit influxdb.service + +# Command-line options that can be set in /etc/default/influxdb. These will override +# any config file values. +DEFAULT=/etc/default/influxdb + +# Daemon options +INFLUXD_OPTS= + +# Process name ( For display ) +NAME=influxdb + +# User and group +USER=influxdb +GROUP=influxdb + +# Check for sudo or root privileges before continuing +if [ "$UID" != "0" ]; then + echo "You must be root to run this script" + exit 1 +fi + +# Daemon name, where is the actual executable If the daemon is not +# there, then exit. +DAEMON=/usr/bin/influxd +if [ ! -x $DAEMON ]; then + echo "Executable $DAEMON does not exist!" + exit 5 +fi + +# Configuration file +CONFIG=/etc/influxdb/influxdb.conf + +# PID file for the daemon +PIDFILE=/var/run/influxdb/influxd.pid +PIDDIR=`dirname $PIDFILE` +if [ ! -d "$PIDDIR" ]; then + mkdir -p $PIDDIR + chown $USER:$GROUP $PIDDIR +fi + +# Max open files +OPEN_FILE_LIMIT=65536 + +if [ -r /lib/lsb/init-functions ]; then + source /lib/lsb/init-functions +fi + +# Logging +if [ -z "$STDOUT" ]; then + STDOUT=/dev/null +fi + +if [ ! -f "$STDOUT" ]; then + mkdir -p $(dirname $STDOUT) +fi + +if [ -z "$STDERR" ]; then + STDERR=/var/log/influxdb/influxd.log +fi + +if [ ! -f "$STDERR" ]; then + mkdir -p $(dirname $STDERR) +fi + +# Override init script variables with DEFAULT values +if [ -r $DEFAULT ]; then + source $DEFAULT +fi + +function log_failure_msg() { + echo "$@" "[ FAILED ]" +} + +function log_success_msg() { + echo "$@" "[ OK ]" +} + +function start() { + # Check if config file exist + if [ ! -r $CONFIG ]; then + log_failure_msg "config file $CONFIG doesn't exist (or you don't have permission to view)" + exit 4 + fi + + # Check that the PID file exists, and check the actual status of process + if [ -f $PIDFILE ]; then + PID="$(cat $PIDFILE)" + if kill -0 "$PID" &>/dev/null; then + # Process is already up + log_success_msg "$NAME process is already running" + return 0 + fi + else + su -s /bin/sh -c "touch $PIDFILE" $USER &>/dev/null + if [ $? -ne 0 ]; then + log_failure_msg "$PIDFILE not writable, check permissions" + exit 5 + fi + fi + + # Bump the file limits, if required, before launching the daemon. These will + # carry over to launched processes. + if [ `ulimit -n` -lt $OPEN_FILE_LIMIT ]; then + ulimit -n $OPEN_FILE_LIMIT + if [ $? -ne 0 ]; then + log_failure_msg "Unable to set ulimit to $OPEN_FILE_LIMIT" + exit 1 + fi + fi + + # Launch process + echo "Starting $NAME..." + if which start-stop-daemon &>/dev/null; then + start-stop-daemon \ + --chuid $USER:$GROUP \ + --start \ + --quiet \ + --pidfile $PIDFILE \ + --exec $DAEMON \ + -- \ + -pidfile $PIDFILE \ + -config $CONFIG \ + $INFLUXD_OPTS >>$STDOUT 2>>$STDERR & + else + local CMD="$DAEMON -pidfile $PIDFILE -config $CONFIG $INFLUXD_OPTS >>$STDOUT 2>>$STDERR &" + su -s /bin/sh -c "$CMD" $USER + fi + + # Sleep to verify process is still up + sleep 1 + if [ -f $PIDFILE ]; then + # PIDFILE exists + if kill -0 $(cat $PIDFILE) &>/dev/null; then + # PID up, service running + log_success_msg "$NAME process was started" + return 0 + fi + fi + log_failure_msg "$NAME process was unable to start" + exit 1 +} + +function stop() { + # Stop the daemon. + if [ -f $PIDFILE ]; then + local PID="$(cat $PIDFILE)" + if kill -0 $PID &>/dev/null; then + echo "Stopping $NAME..." + # Process still up, send SIGTERM and remove PIDFILE + kill -s TERM $PID &>/dev/null && rm -f "$PIDFILE" &>/dev/null + n=0 + while true; do + # Enter loop to ensure process is stopped + kill -0 $PID &>/dev/null + if [ "$?" != "0" ]; then + # Process stopped, break from loop + log_success_msg "$NAME process was stopped" + return 0 + fi + + # Process still up after signal, sleep and wait + sleep 1 + n=$(expr $n + 1) + if [ $n -eq 30 ]; then + # After 30 seconds, send SIGKILL + echo "Timeout exceeded, sending SIGKILL..." + kill -s KILL $PID &>/dev/null + elif [ $? -eq 40 ]; then + # After 40 seconds, error out + log_failure_msg "could not stop $NAME process" + exit 1 + fi + done + fi + fi + log_success_msg "$NAME process already stopped" +} + +function restart() { + # Restart the daemon. + stop + start +} + +function status() { + # Check the status of the process. + if [ -f $PIDFILE ]; then + PID="$(cat $PIDFILE)" + if kill -0 $PID &>/dev/null; then + log_success_msg "$NAME process is running" + exit 0 + fi + fi + log_failure_msg "$NAME process is not running" + exit 1 +} + +case $1 in + start) + start + ;; + + stop) + stop + ;; + + restart) + restart + ;; + + status) + status + ;; + + version) + $DAEMON version + ;; + + *) + # For invalid arguments, print the usage message. + echo "Usage: $0 {start|stop|restart|status|version}" + exit 2 + ;; +esac diff --git a/ssh/id_rsa b/ssh/id_rsa new file mode 100755 index 0000000..a0f959b --- /dev/null +++ b/ssh/id_rsa @@ -0,0 +1,51 @@ +-----BEGIN RSA PRIVATE KEY----- +MIIJKQIBAAKCAgEA0qtihbe5Vg532k3MqULzP1JaRt7PXPQAxwks9XXHUOO3H/4w +Ij7Xoe4ydHabLEH/5a/RrOdECNm7/kL3xBgPqcfOfYZfgdl0okI88IIC0M8mBRJG +4FIt6shIxQi2uNbaUsSIyPP/mFhi1BHhU+Kt0SJTZ8nmpuIAR64bGKLIaOMno87E +oZcu1CrhrHxZ9uw3KtHh/oGNZU7fJMiSBdXrimcKvqLHaK9zgMWD+qg1QEymK1K6 +e8Itw7+HQKimo2JJKgtNUYV55CaQRhA4MscSa4hYc4ap7q7sEyaRJU7+241bqOol +jnSmoKrmIb4BKIbKI6j0rVCvu2FnQ6VOpoGL/QC5Sr/ZhpmFuL7G4SJ4IrQu3WgU +UaPEeauYrLfqsKuR2r1Apav0VdwbjuaDLcDfO/ftwlXPftpg7qsMz6eIJzCnkEPD +fUTV4BmgLvxpJRd/EYk9aW0LrxB3k+gDnd36Xt37kvkx1goMrVADtszMDwTZyTHD +9EtHlH1F+8/JNDkyDH9HTFnyL7K7pugf5iQA0cNDKBW6ypavTlEUBsZ/S+CJB6Rn +8hbwTQSLC0GRlH8a50AMZFmUMn0IBp+3dwGiztN6m48DgGYmvumJlxW/We5EcHvw +duJY1tdzNPvUXM7BTwoTp98KOlNiHxaIN+P3EcWXSeiC4lGiyB/QvHMLpvUCAwEA +AQKCAgEApLVYXyGIpu7C6Awhskhk9CIH8Rbs20jufq7+1p6SCf0vxLSwn2Wwo6sf +CY1dvtmPH945ucxKlfzeDWoAWvFQ9bYBDIUNkPEaTm4pRQTw/J6opBhpD6NQwRp8 +QoiQi613nnSIWUzjjhG0mweruzEkIrmKqj/htKJIA8Ls7nL+63qGGH1loM0Z0MoC +DSBgGj0O2Mylu51Alm926URjXmqlMF1luix8aa2bSZejm0r971q+JGsW4AGuVDgf +n/774gvLFjMl7VHFPgqkjBLMTW71SNueBBzKKZHbgxGJM7kyayd2/XarjFhJmp5j +NQhf0Xao/f5v5j2KV59M5EePWYE5l1ZMUZY/OmIN028iNwrew7BpnN4+W4nb1vBg +Jx32A8q3dXMeZUAb70BB3aHYyoM8432VPjIBrq/hDae4xCNuy+7L0ZiHLzYzHJ8r +w511EPPiEt+VMedlhLdEoMYEplk0//zgc7tS9yvc0JFjuy46kWDR4rMadZ78ntfT +VAMK8vtlsUBOtRhz/koAEWrbNbW718mLnR2UdAwgYyaU+luuSgt/RzOn/YfvuIp2 +fghcN6QQfJEdohLGI3VK9kEJhBJH1TqPQjWCc5FKS28+ptdwvHtCrpKHWAFFSREB +XazKNvXnGGWL4LO0vpQXQ8bWq4OvPvm1WHbq3by76KzefhnIvLUCggEBAPrcleiH +QWaK2VbmvjXqPFjPPGDGHq3cwiKTW9sT0GjVALzNONLCripJEbee29DDvNl/Nz3Z +6tyqS7pS/0AJCUqUkwUV2ou1YbWwBhhNnAGX9eJVlOMYpONIIxXZDbCCpjER1SfU +ChLILPnnrIXdR5FgAApDxY0Szc4UGyZOpLqBl5/L3DqARamcujqV8UJVaCy+ivLA +Xhx12C8iXR43XjW21DGV51BgfCnIBjPvgKVbSBIBCBTRrgWYQtd5LnkM5gtN/qkd +Zaemop58tV8LH4FA80ov24rlji4dxXERkICm280/pfIYLRLBcp/jxxSIk9vZ8ToE +X5VqImUnNGdpFg8CggEBANb8DFEPP4rDFnWDC+Qi/Qx7bSsztdhEDXf8/+8I60xy +9P+RPFKmiMMg4oR1pw7U/drf5enFJDp36+0FhGGWHMlfTdyYFVbD5m9JnNd0K8HE +R+AAcvSKg4j+YLwn6dM1rPwmhvIc6y7XH1qt91TrGAhiz4eNP79wYvVFRgxGEL1c ++M1vpmK8/5BzcCSj2VkH6M3alwuZInxkAK6la80ZhSXgMRsKl9QcNiV+pPPC9mWI +Cn13xuKmBJtWTnJTLABUcQ5g2/dWVq4GD1NJ4oHziQ+anWD3gd94Y2UHUmIq2uS1 +VKcXQUtcgl57DZGnj7/8DPv6qKoUtT+u00fS6Pmo1rsCggEBALY5whNVHObV+aas +moMjkY6Ziyxo/iQhdkwTaugq7nsYLx72nE5M6KMCzQHrPZXaebYsOP6O65I7iiLg +41WqursYTeulwkk2CrImGWdSCP8HWa99p1eHsU8AOaxHLTCI1xxR7dquplqaWkF2 +TtY2tSQe7iiGrlwZyrHl3QrombR5KqEDPwin4PKiTIz+GnCrBa9rAhfwMOzPlag3 +eBjY83BlTKJwV8UFMWxQRmE3muWC90lMwogNADJENlm7R0zbV8fichh6qDsuSnaB +9GYD+HC1s4SyWvH8almJesD1l1g83PGtSGghs6wqJY+LsDN0OqO7HwWb46rvm+iK +oAB2T3UCggEAB1728SSLsi9EKt1GYBkPAgmNUajhODwFFjMXd1Of9awPWF/baJ4L +HF2ZRp9eDfUhiwqYMG6w8kxAS40f39Sswf7Bham1OZdX5ELq3Lw/79HSAQ0i492H +6v+Fw42+x54Ug0Sy5cQ2EBISIiLK0x+IYGk64lPmdqLc0boBT9RrKILyiOCmHMaM +IKDEnIz9m77YrY4sFZq3gvpOx64mRw7z8LuCE4JGeGBLPvfbofvyRMLoUSr1J7o3 +eDf6UpMldBFxGuQZRp6wNCFDD0D+QiRec6mCcduqmUXgaotYuMLtZGAPOfdjJrlS +QvkHGxJarGNw8obCl6pEGJ3e4pyJnl26lwKCAQBwH+pfoVNsSIj7u1G1CpY96gmi +FNuLQRJfcOQBi7sIPBEU4cWIY6HPloZFSs+8KDQlaPcjaRkyZdsmKnwKS5Z7/Q53 +YzShg3scuCXAFNCWGwdy9/nXj3UZ4bjrYzwHPpjiW4KQTILyMeBkqFF/g2odJ4SU +ntczpPYayTzEvY0lJsgqsKR4cquhLTIiEIFYT1TwjBaRK/kJnNU/oMmByxDsHFda +D7KiCd78OkEc6OsmP/u5vnMSi8la1UUuugkHgg7J00xlDwvOM9KLdMoBrrhUmwXE +XpNU5Jj/fozAeiecG9eXIJGdxzZ7m+RU6EZ+AMIZoNmpNEqfYvFeh/cHWmlr +-----END RSA PRIVATE KEY----- diff --git a/ssh/id_rsa.pub b/ssh/id_rsa.pub new file mode 100755 index 0000000..114173d --- /dev/null +++ b/ssh/id_rsa.pub @@ -0,0 +1 @@ +ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAACAQDSq2KFt7lWDnfaTcypQvM/UlpG3s9c9ADHCSz1dcdQ47cf/jAiPteh7jJ0dpssQf/lr9Gs50QI2bv+QvfEGA+px859hl+B2XSiQjzwggLQzyYFEkbgUi3qyEjFCLa41tpSxIjI8/+YWGLUEeFT4q3RIlNnyeam4gBHrhsYosho4yejzsShly7UKuGsfFn27Dcq0eH+gY1lTt8kyJIF1euKZwq+osdor3OAxYP6qDVATKYrUrp7wi3Dv4dAqKajYkkqC01RhXnkJpBGEDgyxxJriFhzhqnuruwTJpElTv7bjVuo6iWOdKagquYhvgEohsojqPStUK+7YWdDpU6mgYv9ALlKv9mGmYW4vsbhIngitC7daBRRo8R5q5ist+qwq5HavUClq/RV3BuO5oMtwN879+3CVc9+2mDuqwzPp4gnMKeQQ8N9RNXgGaAu/GklF38RiT1pbQuvEHeT6AOd3fpe3fuS+THWCgytUAO2zMwPBNnJMcP0S0eUfUX7z8k0OTIMf0dMWfIvsrum6B/mJADRw0MoFbrKlq9OURQGxn9L4IkHpGfyFvBNBIsLQZGUfxrnQAxkWZQyfQgGn7d3AaLO03qbjwOAZia+6YmXFb9Z7kRwe/B24ljW13M0+9RczsFPChOn3wo6U2IfFog34/cRxZdJ6ILiUaLIH9C8cwum9Q== samuele@Dell-XPS-13 diff --git a/supervisord/supervisord.conf b/supervisord/supervisord.conf new file mode 100755 index 0000000..06a575a --- /dev/null +++ b/supervisord/supervisord.conf @@ -0,0 +1,22 @@ +[supervisord] +nodaemon = true + +[program:sshd] +priority = 1 +command = /usr/sbin/sshd -D + +[program:influxdb] +priority = 3 +command = bash -c "/etc/init.d/influxdb start && sleep 5" + +[program:telegraf] +priority = 4 +command = bash -c "sleep 30 && /etc/init.d/telegraf start" + +[program:grafana] +priority = 5 +command = /etc/init.d/grafana-server start + +[program:chronograf] +priority = 6 +command = /usr/bin/chronograf -r diff --git a/system/99fixbadproxy b/system/99fixbadproxy new file mode 100755 index 0000000..f2901b0 --- /dev/null +++ b/system/99fixbadproxy @@ -0,0 +1,4 @@ +Acquire::http::Pipeline-Depth "0"; +Acquire::http::No-Cache=True; +Acquire::BrokenProxy=true; + diff --git a/telegraf/init.sh b/telegraf/init.sh new file mode 100755 index 0000000..1994deb --- /dev/null +++ b/telegraf/init.sh @@ -0,0 +1,208 @@ +#! /usr/bin/env bash + +# chkconfig: 2345 99 01 +# description: Telegraf daemon + +### BEGIN INIT INFO +# Provides: telegraf +# Required-Start: $all +# Required-Stop: $remote_fs $syslog +# Default-Start: 2 3 4 5 +# Default-Stop: 0 1 6 +# Short-Description: Start telegraf at boot time +### END INIT INFO + +# this init script supports three different variations: +# 1. New lsb that define start-stop-daemon +# 2. Old lsb that don't have start-stop-daemon but define, log, pidofproc and killproc +# 3. Centos installations without lsb-core installed +# +# In the third case we have to define our own functions which are very dumb +# and expect the args to be positioned correctly. + +# Command-line options that can be set in /etc/default/telegraf. These will override +# any config file values. +TELEGRAF_OPTS= + +USER=telegraf +GROUP=telegraf + +if [ -r /lib/lsb/init-functions ]; then + source /lib/lsb/init-functions +fi + +DEFAULT=/etc/default/telegraf + +if [ -r $DEFAULT ]; then + source $DEFAULT +fi + +if [ -z "$STDOUT" ]; then + STDOUT=/dev/null +fi +if [ ! -f "$STDOUT" ]; then + mkdir -p `dirname $STDOUT` +fi + +if [ -z "$STDERR" ]; then + STDERR=/var/log/telegraf/telegraf.log +fi +if [ ! -f "$STDERR" ]; then + mkdir -p `dirname $STDERR` +fi + +OPEN_FILE_LIMIT=65536 + +function pidofproc() { + if [ $# -ne 3 ]; then + echo "Expected three arguments, e.g. $0 -p pidfile daemon-name" + fi + + if [ ! -f "$2" ]; then + return 1 + fi + + local pidfile=`cat $2` + + if [ "x$pidfile" == "x" ]; then + return 1 + fi + + if ps --pid "$pidfile" | grep -q $(basename $3); then + return 0 + fi + + return 1 +} + +function killproc() { + if [ $# -ne 3 ]; then + echo "Expected three arguments, e.g. $0 -p pidfile signal" + fi + + pid=`cat $2` + + kill -s $3 $pid +} + +function log_failure_msg() { + echo "$@" "[ FAILED ]" +} + +function log_success_msg() { + echo "$@" "[ OK ]" +} + +# Process name ( For display ) +name=telegraf + +# Daemon name, where is the actual executable +daemon=/usr/bin/telegraf + +# pid file for the daemon +pidfile=/var/run/telegraf/telegraf.pid +piddir=`dirname $pidfile` + +if [ ! -d "$piddir" ]; then + mkdir -p $piddir + chown $USER:$GROUP $piddir +fi + +# Configuration file +config=/etc/telegraf/telegraf.conf +confdir=/etc/telegraf/telegraf.d + +# If the daemon is not there, then exit. +[ -x $daemon ] || exit 5 + +case $1 in + start) + # Checked the PID file exists and check the actual status of process + if [ -e $pidfile ]; then + pidofproc -p $pidfile $daemon > /dev/null 2>&1 && status="0" || status="$?" + # If the status is SUCCESS then don't need to start again. + if [ "x$status" = "x0" ]; then + log_failure_msg "$name process is running" + exit 0 # Exit + fi + fi + + # Bump the file limits, before launching the daemon. These will carry over to + # launched processes. + ulimit -n $OPEN_FILE_LIMIT + if [ $? -ne 0 ]; then + log_failure_msg "set open file limit to $OPEN_FILE_LIMIT" + fi + + log_success_msg "Starting the process" "$name" + if which start-stop-daemon > /dev/null 2>&1; then + start-stop-daemon --chuid $USER:$GROUP --start --quiet --pidfile $pidfile --exec $daemon -- -pidfile $pidfile -config $config -config-directory $confdir $TELEGRAF_OPTS >>$STDOUT 2>>$STDERR & + else + su -s /bin/sh -c "nohup $daemon -pidfile $pidfile -config $config -config-directory $confdir $TELEGRAF_OPTS >>$STDOUT 2>>$STDERR &" $USER + fi + log_success_msg "$name process was started" + ;; + + stop) + # Stop the daemon. + if [ -e $pidfile ]; then + pidofproc -p $pidfile $daemon > /dev/null 2>&1 && status="0" || status="$?" + if [ "$status" = 0 ]; then + if killproc -p $pidfile SIGTERM && /bin/rm -rf $pidfile; then + log_success_msg "$name process was stopped" + else + log_failure_msg "$name failed to stop service" + fi + fi + else + log_failure_msg "$name process is not running" + fi + ;; + + reload) + # Reload the daemon. + if [ -e $pidfile ]; then + pidofproc -p $pidfile $daemon > /dev/null 2>&1 && status="0" || status="$?" + if [ "$status" = 0 ]; then + if killproc -p $pidfile SIGHUP; then + log_success_msg "$name process was reloaded" + else + log_failure_msg "$name failed to reload service" + fi + fi + else + log_failure_msg "$name process is not running" + fi + ;; + + restart) + # Restart the daemon. + $0 stop && sleep 2 && $0 start + ;; + + status) + # Check the status of the process. + if [ -e $pidfile ]; then + if pidofproc -p $pidfile $daemon > /dev/null; then + log_success_msg "$name Process is running" + exit 0 + else + log_failure_msg "$name Process is not running" + exit 1 + fi + else + log_failure_msg "$name Process is not running" + exit 3 + fi + ;; + + version) + $daemon version + ;; + + *) + # For invalid arguments, print the usage message. + echo "Usage: $0 {start|stop|restart|status|version}" + exit 2 + ;; +esac diff --git a/telegraf/telegraf.conf b/telegraf/telegraf.conf new file mode 100755 index 0000000..064409f --- /dev/null +++ b/telegraf/telegraf.conf @@ -0,0 +1,2069 @@ +# Telegraf Configuration +# +# Telegraf is entirely plugin driven. All metrics are gathered from the +# declared inputs, and sent to the declared outputs. +# +# Plugins must be declared in here to be active. +# To deactivate a plugin, comment out the name and any variables. +# +# Use 'telegraf -config telegraf.conf -test' to see what metrics a config +# file would generate. +# +# Environment variables can be used anywhere in this config file, simply prepend +# them with $. For strings the variable must be within quotes (ie, "$STR_VAR"), +# for numbers and booleans they should be plain (ie, $INT_VAR, $BOOL_VAR) + + +# Global tags can be specified here in key="value" format. +[global_tags] + # dc = "us-east-1" # will tag all metrics with dc=us-east-1 + # rack = "1a" + ## Environment variables can be used as tags, and throughout the config file + # user = "$USER" + + +# Configuration for telegraf agent +[agent] + ## Default data collection interval for all inputs + interval = "1s" + ## Rounds collection interval to 'interval' + ## ie, if interval="10s" then always collect on :00, :10, :20, etc. + round_interval = true + + ## Telegraf will send metrics to outputs in batches of at most + ## metric_batch_size metrics. + ## This controls the size of writes that Telegraf sends to output plugins. + metric_batch_size = 1000 + + ## For failed writes, telegraf will cache metric_buffer_limit metrics for each + ## output, and will flush this buffer on a successful write. Oldest metrics + ## are dropped first when this buffer fills. + ## This buffer only fills when writes fail to output plugin(s). + metric_buffer_limit = 10000 + + ## Collection jitter is used to jitter the collection by a random amount. + ## Each plugin will sleep for a random time within jitter before collecting. + ## This can be used to avoid many plugins querying things like sysfs at the + ## same time, which can have a measurable effect on the system. + collection_jitter = "0s" + + ## Default flushing interval for all outputs. You shouldn't set this below + ## interval. Maximum flush_interval will be flush_interval + flush_jitter + flush_interval = "10s" + ## Jitter the flush interval by a random amount. This is primarily to avoid + ## large write spikes for users running a large number of telegraf instances. + ## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s + flush_jitter = "0s" + + ## By default, precision will be set to the same timestamp order as the + ## collection interval, with the maximum being 1s. + ## Precision will NOT be used for service inputs, such as logparser and statsd. + ## Valid values are "ns", "us" (or "µs"), "ms", "s". + precision = "" + + ## Logging configuration: + ## Run telegraf with debug log messages. + debug = false + ## Run telegraf in quiet mode (error log messages only). + quiet = false + ## Specify the log file name. The empty string means to log to stderr. + logfile = "" + + ## Override default hostname, if empty use os.Hostname() + hostname = "" + ## If set to true, do no set the "host" tag in the telegraf agent. + omit_hostname = false + + +############################################################################### +# OUTPUT PLUGINS # +############################################################################### + +# Configuration for influxdb server to send metrics to +[[outputs.influxdb]] + ## The full HTTP or UDP endpoint URL for your InfluxDB instance. + ## Multiple urls can be specified as part of the same cluster, + ## this means that only ONE of the urls will be written to each interval. + # urls = ["udp://localhost:8089"] # UDP endpoint example + urls = ["http://localhost:8086"] # required + ## The target database for metrics (telegraf will create it if not exists). + database = "telegraf" # required + + ## Retention policy to write to. Empty string writes to the default rp. + retention_policy = "" + ## Write consistency (clusters only), can be: "any", "one", "quorum", "all" + write_consistency = "any" + + ## Write timeout (for the InfluxDB client), formatted as a string. + ## If not provided, will default to 5s. 0s means no timeout (not recommended). + timeout = "5s" + username = "telegraf" + password = "telegraf" + ## Set the user agent for HTTP POSTs (can be useful for log differentiation) + # user_agent = "telegraf" + ## Set UDP payload size, defaults to InfluxDB UDP Client default (512 bytes) + # udp_payload = 512 + + ## Optional SSL Config + # ssl_ca = "/etc/telegraf/ca.pem" + # ssl_cert = "/etc/telegraf/cert.pem" + # ssl_key = "/etc/telegraf/key.pem" + ## Use SSL but skip chain & host verification + # insecure_skip_verify = false + + +# # Configuration for Amon Server to send metrics to. +# [[outputs.amon]] +# ## Amon Server Key +# server_key = "my-server-key" # required. +# +# ## Amon Instance URL +# amon_instance = "https://youramoninstance" # required +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Configuration for the AMQP server to send metrics to +# [[outputs.amqp]] +# ## AMQP url +# url = "amqp://localhost:5672/influxdb" +# ## AMQP exchange +# exchange = "telegraf" +# ## Auth method. PLAIN and EXTERNAL are supported +# # auth_method = "PLAIN" +# ## Telegraf tag to use as a routing key +# ## ie, if this tag exists, it's value will be used as the routing key +# routing_tag = "host" +# +# ## InfluxDB retention policy +# # retention_policy = "default" +# ## InfluxDB database +# # database = "telegraf" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for AWS CloudWatch output. +# [[outputs.cloudwatch]] +# ## Amazon REGION +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Assumed credentials via STS if role_arn is specified +# ## 2) explicit credentials from 'access_key' and 'secret_key' +# ## 3) shared profile from 'profile' +# ## 4) environment variables +# ## 5) shared credentials file +# ## 6) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Namespace for the CloudWatch MetricDatums +# namespace = "InfluxData/Telegraf" + + +# # Configuration for DataDog API to send metrics to. +# [[outputs.datadog]] +# ## Datadog API key +# apikey = "my-secret-key" # required. +# +# ## Connection timeout. +# # timeout = "5s" + + +# # Send metrics to nowhere at all +# [[outputs.discard]] +# # no configuration + + +# # Send telegraf metrics to file(s) +# [[outputs.file]] +# ## Files to write to, "stdout" is a specially handled file. +# files = ["stdout", "/tmp/metrics.out"] +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for Graphite server to send metrics to +# [[outputs.graphite]] +# ## TCP endpoint for your graphite instance. +# ## If multiple endpoints are configured, output will be load balanced. +# ## Only one of the endpoints will be written to with each iteration. +# servers = ["localhost:2003"] +# ## Prefix metrics name +# prefix = "" +# ## Graphite output template +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# template = "host.tags.measurement.field" +# ## timeout in seconds for the write connection to graphite +# timeout = 2 + + +# # Send telegraf metrics to graylog(s) +# [[outputs.graylog]] +# ## UDP endpoint for your graylog instance. +# servers = ["127.0.0.1:12201", "192.168.1.1:12201"] + + +# # Configuration for sending metrics to an Instrumental project +# [[outputs.instrumental]] +# ## Project API Token (required) +# api_token = "API Token" # required +# ## Prefix the metrics with a given name +# prefix = "" +# ## Stats output template (Graphite formatting) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# template = "host.tags.measurement.field" +# ## Timeout in seconds to connect +# timeout = "2s" +# ## Display Communcation to Instrumental +# debug = false + + +# # Configuration for the Kafka server to send metrics to +# [[outputs.kafka]] +# ## URLs of kafka brokers +# brokers = ["localhost:9092"] +# ## Kafka topic for producer messages +# topic = "telegraf" +# ## Telegraf tag to use as a routing key +# ## ie, if this tag exists, it's value will be used as the routing key +# routing_tag = "host" +# +# ## CompressionCodec represents the various compression codecs recognized by +# ## Kafka in messages. +# ## 0 : No compression +# ## 1 : Gzip compression +# ## 2 : Snappy compression +# compression_codec = 0 +# +# ## RequiredAcks is used in Produce Requests to tell the broker how many +# ## replica acknowledgements it must see before responding +# ## 0 : the producer never waits for an acknowledgement from the broker. +# ## This option provides the lowest latency but the weakest durability +# ## guarantees (some data will be lost when a server fails). +# ## 1 : the producer gets an acknowledgement after the leader replica has +# ## received the data. This option provides better durability as the +# ## client waits until the server acknowledges the request as successful +# ## (only messages that were written to the now-dead leader but not yet +# ## replicated will be lost). +# ## -1: the producer gets an acknowledgement after all in-sync replicas have +# ## received the data. This option provides the best durability, we +# ## guarantee that no messages will be lost as long as at least one in +# ## sync replica remains. +# required_acks = -1 +# +# ## The total number of times to retry sending a message +# max_retry = 3 +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for the AWS Kinesis output. +# [[outputs.kinesis]] +# ## Amazon REGION of kinesis endpoint. +# region = "ap-southeast-2" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Assumed credentials via STS if role_arn is specified +# ## 2) explicit credentials from 'access_key' and 'secret_key' +# ## 3) shared profile from 'profile' +# ## 4) environment variables +# ## 5) shared credentials file +# ## 6) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #profile = "" +# #shared_credential_file = "" +# +# ## Kinesis StreamName must exist prior to starting telegraf. +# streamname = "StreamName" +# ## PartitionKey as used for sharding data. +# partitionkey = "PartitionKey" +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" +# +# ## debug will show upstream aws messages. +# debug = false + + +# # Configuration for Librato API to send metrics to. +# [[outputs.librato]] +# ## Librator API Docs +# ## http://dev.librato.com/v1/metrics-authentication +# ## Librato API user +# api_user = "telegraf@influxdb.com" # required. +# ## Librato API token +# api_token = "my-secret-token" # required. +# ## Debug +# # debug = false +# ## Connection timeout. +# # timeout = "5s" +# ## Output source Template (same as graphite buckets) +# ## see https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md#graphite +# ## This template is used in librato's source (not metric's name) +# template = "host" +# + + +# # Configuration for MQTT server to send metrics to +# [[outputs.mqtt]] +# servers = ["localhost:1883"] # required. +# +# ## MQTT outputs send metrics to this topic format +# ## "///" +# ## ex: prefix/web01.example.com/mem +# topic_prefix = "telegraf" +# +# ## username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## client ID, if not set a random ID is generated +# # client_id = "" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf measurements to NATS +# [[outputs.nats]] +# ## URLs of NATS servers +# servers = ["nats://localhost:4222"] +# ## Optional credentials +# # username = "" +# # password = "" +# ## NATS subject for producer messages +# subject = "telegraf" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Send telegraf measurements to NSQD +# [[outputs.nsq]] +# ## Location of nsqd instance listening on TCP +# server = "localhost:4150" +# ## NSQ topic for producer messages +# topic = "telegraf" +# +# ## Data format to output. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_OUTPUT.md +# data_format = "influx" + + +# # Configuration for OpenTSDB server to send metrics to +# [[outputs.opentsdb]] +# ## prefix for metrics keys +# prefix = "my.specific.prefix." +# +# ## DNS name of the OpenTSDB server +# ## Using "opentsdb.example.com" or "tcp://opentsdb.example.com" will use the +# ## telnet API. "http://opentsdb.example.com" will use the Http API. +# host = "opentsdb.example.com" +# +# ## Port of the OpenTSDB server +# port = 4242 +# +# ## Number of data points to send to OpenTSDB in Http requests. +# ## Not used with telnet API. +# httpBatchSize = 50 +# +# ## Debug true - Prints OpenTSDB communication +# debug = false + + +# # Configuration for the Prometheus client to spawn +# [[outputs.prometheus_client]] +# ## Address to listen on +# # listen = ":9126" +# +# ## Interval to expire metrics and not deliver to prometheus, 0 == no expiration +# # expiration_interval = "60s" + + +# # Configuration for the Riemann server to send metrics to +# [[outputs.riemann]] +# ## URL of server +# url = "localhost:5555" +# ## transport protocol to use either tcp or udp +# transport = "tcp" +# ## separator to use between input name and field name in Riemann service name +# separator = " " + + + +############################################################################### +# PROCESSOR PLUGINS # +############################################################################### + +# # Print all metrics that pass through this filter. +# [[processors.printer]] + + + +############################################################################### +# AGGREGATOR PLUGINS # +############################################################################### + +# # Keep the aggregate min/max of each metric passing through. +# [[aggregators.minmax]] +# ## General Aggregator Arguments: +# ## The period on which to flush & clear the aggregator. +# period = "30s" +# ## If true, the original metric will be dropped by the +# ## aggregator and will not get sent to the output plugins. +# drop_original = false + + + +############################################################################### +# INPUT PLUGINS # +############################################################################### + +# Read metrics about cpu usage +[[inputs.cpu]] + ## Whether to report per-cpu stats or not + percpu = true + ## Whether to report total system cpu stats or not + totalcpu = true + ## If true, collect raw CPU time metrics. + collect_cpu_time = false + + +# Read metrics about disk usage by mount point +[[inputs.disk]] + ## By default, telegraf gather stats for all mountpoints. + ## Setting mountpoints will restrict the stats to the specified mountpoints. + # mount_points = ["/"] + + ## Ignore some mountpoints by filesystem type. For example (dev)tmpfs (usually + ## present on /run, /var/run, /dev/shm or /dev). + ignore_fs = ["tmpfs", "devtmpfs"] + + +# Read metrics about disk IO by device +[[inputs.diskio]] + ## By default, telegraf will gather stats for all devices including + ## disk partitions. + ## Setting devices will restrict the stats to the specified devices. + # devices = ["sda", "sdb"] + ## Uncomment the following line if you need disk serial numbers. + # skip_serial_number = false + + +# Get kernel statistics from /proc/stat +[[inputs.kernel]] + # no configuration + + +# Read metrics about memory usage +[[inputs.mem]] + # no configuration + + +# Get the number of processes and group them by status +[[inputs.processes]] + # no configuration + + +# Read metrics about swap memory usage +[[inputs.swap]] + # no configuration + + +# Read metrics about system load & uptime +[[inputs.system]] + # no configuration + + +# # Read stats from aerospike server(s) +# [[inputs.aerospike]] +# ## Aerospike servers to connect to (with port) +# ## This plugin will query all namespaces the aerospike +# ## server has configured and get stats for them. +# servers = ["localhost:3000"] + + +# # Read Apache status information (mod_status) +# [[inputs.apache]] +# ## An array of Apache status URI to gather stats. +# ## Default is "http://localhost/server-status?auto". +# urls = ["http://localhost/server-status?auto"] +# ## user credentials for basic HTTP authentication +# username = "myuser" +# password = "mypassword" +# +# ## Timeout to the complete conection and reponse time in seconds +# response_timeout = "25s" ## default to 5 seconds +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics of bcache from stats_total and dirty_data +# [[inputs.bcache]] +# ## Bcache sets path +# ## If not specified, then default is: +# bcachePath = "/sys/fs/bcache" +# +# ## By default, telegraf gather stats for all bcache devices +# ## Setting devices will restrict the stats to the specified +# ## bcache devices. +# bcacheDevs = ["bcache0"] + + +# # Read Cassandra metrics through Jolokia +# [[inputs.cassandra]] +# # This is the context root used to compose the jolokia url +# context = "/jolokia/read" +# ## List of cassandra servers exposing jolokia read service +# servers = ["myuser:mypassword@10.10.10.1:8778","10.10.10.2:8778",":8778"] +# ## List of metrics collected on above servers +# ## Each metric consists of a jmx path. +# ## This will collect all heap memory usage metrics from the jvm and +# ## ReadLatency metrics for all keyspaces and tables. +# ## "type=Table" in the query works with Cassandra3.0. Older versions might +# ## need to use "type=ColumnFamily" +# metrics = [ +# "/java.lang:type=Memory/HeapMemoryUsage", +# "/org.apache.cassandra.metrics:type=Table,keyspace=*,scope=*,name=ReadLatency" +# ] + + +# # Collects performance metrics from the MON and OSD nodes in a Ceph storage cluster. +# [[inputs.ceph]] +# ## This is the recommended interval to poll. Too frequent and you will lose +# ## data points due to timeouts during rebalancing and recovery +# interval = '1m' +# +# ## All configuration values are optional, defaults are shown below +# +# ## location of ceph binary +# ceph_binary = "/usr/bin/ceph" +# +# ## directory in which to look for socket files +# socket_dir = "/var/run/ceph" +# +# ## prefix of MON and OSD socket files, used to determine socket type +# mon_prefix = "ceph-mon" +# osd_prefix = "ceph-osd" +# +# ## suffix used to identify socket files +# socket_suffix = "asok" +# +# ## Ceph user to authenticate as +# ceph_user = "client.admin" +# +# ## Ceph configuration to use to locate the cluster +# ceph_config = "/etc/ceph/ceph.conf" +# +# ## Whether to gather statistics via the admin socket +# gather_admin_socket_stats = true +# +# ## Whether to gather statistics via ceph commands +# gather_cluster_stats = true + + +# # Read specific statistics per cgroup +# [[inputs.cgroup]] +# ## Directories in which to look for files, globs are supported. +# ## Consider restricting paths to the set of cgroups you really +# ## want to monitor if you have a large number of cgroups, to avoid +# ## any cardinality issues. +# # paths = [ +# # "/cgroup/memory", +# # "/cgroup/memory/child1", +# # "/cgroup/memory/child2/*", +# # ] +# ## cgroup stat fields, as file names, globs are supported. +# ## these file names are appended to each path from above. +# # files = ["memory.*usage*", "memory.limit_in_bytes"] + + +# # Pull Metric Statistics from Amazon CloudWatch +# [[inputs.cloudwatch]] +# ## Amazon Region +# region = "us-east-1" +# +# ## Amazon Credentials +# ## Credentials are loaded in the following order +# ## 1) Assumed credentials via STS if role_arn is specified +# ## 2) explicit credentials from 'access_key' and 'secret_key' +# ## 3) shared profile from 'profile' +# ## 4) environment variables +# ## 5) shared credentials file +# ## 6) EC2 Instance Profile +# #access_key = "" +# #secret_key = "" +# #token = "" +# #role_arn = "" +# #profile = "" +# #shared_credential_file = "" +# +# # The minimum period for Cloudwatch metrics is 1 minute (60s). However not all +# # metrics are made available to the 1 minute period. Some are collected at +# # 3 minute and 5 minutes intervals. See https://aws.amazon.com/cloudwatch/faqs/#monitoring. +# # Note that if a period is configured that is smaller than the minimum for a +# # particular metric, that metric will not be returned by the Cloudwatch API +# # and will not be collected by Telegraf. +# # +# ## Requested CloudWatch aggregation Period (required - must be a multiple of 60s) +# period = "5m" +# +# ## Collection Delay (required - must account for metrics availability via CloudWatch API) +# delay = "5m" +# +# ## Recomended: use metric 'interval' that is a multiple of 'period' to avoid +# ## gaps or overlap in pulled data +# interval = "5m" +# +# ## Configure the TTL for the internal cache of metrics. +# ## Defaults to 1 hr if not specified +# #cache_ttl = "10m" +# +# ## Metric Statistic Namespace (required) +# namespace = "AWS/ELB" +# +# ## Maximum requests per second. Note that the global default AWS rate limit is +# ## 10 reqs/sec, so if you define multiple namespaces, these should add up to a +# ## maximum of 10. Optional - default value is 10. +# ratelimit = 10 +# +# ## Metrics to Pull (optional) +# ## Defaults to all Metrics in Namespace if nothing is provided +# ## Refreshes Namespace available metrics every 1h +# #[[inputs.cloudwatch.metrics]] +# # names = ["Latency", "RequestCount"] +# # +# # ## Dimension filters for Metric (optional) +# # [[inputs.cloudwatch.metrics.dimensions]] +# # name = "LoadBalancerName" +# # value = "p-example" + + +# # Gather health check statuses from services registered in Consul +# [[inputs.consul]] +# ## Most of these values defaults to the one configured on a Consul's agent level. +# ## Optional Consul server address (default: "localhost") +# # address = "localhost" +# ## Optional URI scheme for the Consul server (default: "http") +# # scheme = "http" +# ## Optional ACL token used in every request (default: "") +# # token = "" +# ## Optional username used for request HTTP Basic Authentication (default: "") +# # username = "" +# ## Optional password used for HTTP Basic Authentication (default: "") +# # password = "" +# ## Optional data centre to query the health checks from (default: "") +# # datacentre = "" + + +# # Read metrics from one or many couchbase clusters +# [[inputs.couchbase]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## http://couchbase-0.example.com/ +# ## http://admin:secret@couchbase-0.example.com:8091/ +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no protocol is specifed, HTTP is used. +# ## If no port is specified, 8091 is used. +# servers = ["http://localhost:8091"] + + +# # Read CouchDB Stats from one or more servers +# [[inputs.couchdb]] +# ## Works with CouchDB stats endpoints out of the box +# ## Multiple HOSTs from which to read CouchDB stats: +# hosts = ["http://localhost:8086/_stats"] + + +# # Read metrics from one or many disque servers +# [[inputs.disque]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port and password. +# ## ie disque://localhost, disque://10.10.3.33:18832, 10.0.0.1:10000, etc. +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost"] + + +# # Query given DNS server and gives statistics +# [[inputs.dns_query]] +# ## servers to query +# servers = ["8.8.8.8"] # required +# +# ## Domains or subdomains to query. "."(root) is default +# domains = ["."] # optional +# +# ## Query record type. Default is "A" +# ## Posible values: A, AAAA, CNAME, MX, NS, PTR, TXT, SOA, SPF, SRV. +# record_type = "A" # optional +# +# ## Dns server port. 53 is default +# port = 53 # optional +# +# ## Query timeout in seconds. Default is 2 seconds +# timeout = 2 # optional + + +# # Read metrics about docker containers +# [[inputs.docker]] +# ## Docker Endpoint +# ## To use TCP, set endpoint = "tcp://[ip]:[port]" +# ## To use environment variables (ie, docker-machine), set endpoint = "ENV" +# endpoint = "unix:///var/run/docker.sock" +# ## Only collect metrics for these containers, collect all if empty +# container_names = [] +# ## Timeout for docker list, info, and stats commands +# timeout = "5s" +# +# ## Whether to report for each container per-device blkio (8:0, 8:1...) and +# ## network (eth0, eth1, ...) stats or not +# perdevice = true +# ## Whether to report for each container total blkio and network stats or not +# total = false +# + + +# # Read statistics from one or many dovecot servers +# [[inputs.dovecot]] +# ## specify dovecot servers via an address:port list +# ## e.g. +# ## localhost:24242 +# ## +# ## If no servers are specified, then localhost is used as the host. +# servers = ["localhost:24242"] +# ## Type is one of "user", "domain", "ip", or "global" +# type = "global" +# ## Wildcard matches like "*.com". An empty string "" is same as "*" +# ## If type = "ip" filters should be +# filters = [""] + + +# # Read stats from one or more Elasticsearch servers or clusters +# [[inputs.elasticsearch]] +# ## specify a list of one or more Elasticsearch servers +# # you can add username and password to your url to use basic authentication: +# # servers = ["http://user:pass@localhost:9200"] +# servers = ["http://localhost:9200"] +# +# ## Timeout for HTTP requests to the elastic search server(s) +# http_timeout = "5s" +# +# ## When local is true (the default), the node will read only its own stats. +# ## Set local to false when you want to read the node stats from all nodes +# ## of the cluster. +# local = true +# +# ## Set cluster_health to true when you want to also obtain cluster health stats +# cluster_health = false +# +# ## Set cluster_stats to true when you want to also obtain cluster stats from the +# ## Master node. +# cluster_stats = false +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from one or more commands that can output to stdout +# [[inputs.exec]] +# ## Commands array +# commands = [ +# "/tmp/test.sh", +# "/usr/bin/mycollector --foo=bar", +# "/tmp/collect_*.sh" +# ] +# +# ## Timeout for each command to complete. +# timeout = "5s" +# +# ## measurement name suffix (for separating different commands) +# name_suffix = "_mycollector" +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read stats about given file(s) +# [[inputs.filestat]] +# ## Files to gather stats about. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## "/var/log/**.log" -> recursively find all .log files in /var/log +# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log +# ## "/var/log/apache.log" -> just tail the apache log file +# ## +# ## See https://github.com/gobwas/glob for more examples +# ## +# files = ["/var/log/**.log"] +# ## If true, read the entire file and calculate an md5 checksum. +# md5 = false + + +# # Read flattened metrics from one or more GrayLog HTTP endpoints +# [[inputs.graylog]] +# ## API endpoint, currently supported API: +# ## +# ## - multiple (Ex http://:12900/system/metrics/multiple) +# ## - namespace (Ex http://:12900/system/metrics/namespace/{namespace}) +# ## +# ## For namespace endpoint, the metrics array will be ignored for that call. +# ## Endpoint can contain namespace and multiple type calls. +# ## +# ## Please check http://[graylog-server-ip]:12900/api-browser for full list +# ## of endpoints +# servers = [ +# "http://[graylog-server-ip]:12900/system/metrics/multiple", +# ] +# +# ## Metrics list +# ## List of metrics can be found on Graylog webservice documentation. +# ## Or by hitting the the web service api at: +# ## http://[graylog-host]:12900/system/metrics +# metrics = [ +# "jvm.cl.loaded", +# "jvm.memory.pools.Metaspace.committed" +# ] +# +# ## Username and password +# username = "" +# password = "" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics of haproxy, via socket or csv stats page +# [[inputs.haproxy]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.10.3.33:1936, etc. +# ## Make sure you specify the complete path to the stats endpoint +# ## including the protocol, ie http://10.10.3.33:1936/haproxy?stats +# # +# ## If no servers are specified, then default to 127.0.0.1:1936/haproxy?stats +# servers = ["http://myhaproxy.com:1936/haproxy?stats"] +# ## +# ## You can also use local socket with standard wildcard globbing. +# ## Server address not starting with 'http' will be treated as a possible +# ## socket, so both examples below are valid. +# ## servers = ["socket:/run/haproxy/admin.sock", "/run/haproxy/*.sock"] + + +# # HTTP/HTTPS request given an address a method and a timeout +# [[inputs.http_response]] +# ## Server address (default http://localhost) +# address = "http://github.com" +# ## Set response_timeout (default 5 seconds) +# response_timeout = "5s" +# ## HTTP Request Method +# method = "GET" +# ## Whether to follow redirects from the server (defaults to false) +# follow_redirects = true +# ## HTTP Request Headers (all values must be strings) +# # [inputs.http_response.headers] +# # Host = "github.com" +# ## Optional HTTP Request Body +# # body = ''' +# # {'fake':'data'} +# # ''' +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read flattened metrics from one or more JSON HTTP endpoints +# [[inputs.httpjson]] +# ## NOTE This plugin only reads numerical measurements, strings and booleans +# ## will be ignored. +# +# ## a name for the service being polled +# name = "webserver_stats" +# +# ## URL of each server in the service's cluster +# servers = [ +# "http://localhost:9999/stats/", +# "http://localhost:9998/stats/", +# ] +# ## Set response_timeout (default 5 seconds) +# response_timeout = "5s" +# +# ## HTTP method to use: GET or POST (case-sensitive) +# method = "GET" +# +# ## List of tag names to extract from top-level of JSON server response +# # tag_keys = [ +# # "my_tag_1", +# # "my_tag_2" +# # ] +# +# ## HTTP parameters (all values must be strings) +# [inputs.httpjson.parameters] +# event_type = "cpu_spike" +# threshold = "0.75" +# +# ## HTTP Header parameters (all values must be strings) +# # [inputs.httpjson.headers] +# # X-Auth-Token = "my-xauth-token" +# # apiVersion = "v1" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read InfluxDB-formatted JSON metrics from one or more HTTP endpoints +# [[inputs.influxdb]] +# ## Works with InfluxDB debug endpoints out of the box, +# ## but other services can use this format too. +# ## See the influxdb plugin's README for more details. +# +# ## Multiple URLs from which to read InfluxDB-formatted JSON +# ## Default is "http://localhost:8086/debug/vars". +# urls = [ +# "http://localhost:8086/debug/vars" +# ] +# +# ## http request & header timeout +# timeout = "5s" + + +# # Collect statistics about itself +# [[inputs.internal]] +# ## If true, collect telegraf memory stats. +# # collect_memstats = true + + +# # Read metrics from one or many bare metal servers +# [[inputs.ipmi_sensor]] +# ## specify servers via a url matching: +# ## [username[:password]@][protocol[(address)]] +# ## e.g. +# ## root:passwd@lan(127.0.0.1) +# ## +# servers = ["USERID:PASSW0RD@lan(192.168.1.1)"] + + +# # Read JMX metrics through Jolokia +# [[inputs.jolokia]] +# ## This is the context root used to compose the jolokia url +# ## NOTE that Jolokia requires a trailing slash at the end of the context root +# ## NOTE that your jolokia security policy must allow for POST requests. +# context = "/jolokia/" +# +# ## This specifies the mode used +# # mode = "proxy" +# # +# ## When in proxy mode this section is used to specify further +# ## proxy address configurations. +# ## Remember to change host address to fit your environment. +# # [inputs.jolokia.proxy] +# # host = "127.0.0.1" +# # port = "8080" +# +# ## Optional http timeouts +# ## +# ## response_header_timeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # response_header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## List of servers exposing jolokia read service +# [[inputs.jolokia.servers]] +# name = "as-server-01" +# host = "127.0.0.1" +# port = "8080" +# # username = "myuser" +# # password = "mypassword" +# +# ## List of metrics collected on above servers +# ## Each metric consists in a name, a jmx path and either +# ## a pass or drop slice attribute. +# ## This collect all heap memory usage metrics. +# [[inputs.jolokia.metrics]] +# name = "heap_memory_usage" +# mbean = "java.lang:type=Memory" +# attribute = "HeapMemoryUsage" +# +# ## This collect thread counts metrics. +# [[inputs.jolokia.metrics]] +# name = "thread_count" +# mbean = "java.lang:type=Threading" +# attribute = "TotalStartedThreadCount,ThreadCount,DaemonThreadCount,PeakThreadCount" +# +# ## This collect number of class loaded/unloaded counts metrics. +# [[inputs.jolokia.metrics]] +# name = "class_count" +# mbean = "java.lang:type=ClassLoading" +# attribute = "LoadedClassCount,UnloadedClassCount,TotalLoadedClassCount" + + +# # Read metrics from the kubernetes kubelet api +# [[inputs.kubernetes]] +# ## URL for the kubelet +# url = "http://1.1.1.1:10255" +# +# ## Use bearer token for authorization +# # bearer_token = /path/to/bearer/token +# +# ## Optional SSL Config +# # ssl_ca = /path/to/cafile +# # ssl_cert = /path/to/certfile +# # ssl_key = /path/to/keyfile +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + +# # Read metrics from a LeoFS Server via SNMP +# [[inputs.leofs]] +# ## An array of URI to gather stats about LeoFS. +# ## Specify an ip or hostname with port. ie 127.0.0.1:4020 +# servers = ["127.0.0.1:4021"] + + +# # Read metrics from local Lustre service on OST, MDS +# [[inputs.lustre2]] +# ## An array of /proc globs to search for Lustre stats +# ## If not specified, the default will work on Lustre 2.5.x +# ## +# # ost_procfiles = [ +# # "/proc/fs/lustre/obdfilter/*/stats", +# # "/proc/fs/lustre/osd-ldiskfs/*/stats", +# # "/proc/fs/lustre/obdfilter/*/job_stats", +# # ] +# # mds_procfiles = [ +# # "/proc/fs/lustre/mdt/*/md_stats", +# # "/proc/fs/lustre/mdt/*/job_stats", +# # ] + + +# # Gathers metrics from the /3.0/reports MailChimp API +# [[inputs.mailchimp]] +# ## MailChimp API key +# ## get from https://admin.mailchimp.com/account/api/ +# api_key = "" # required +# ## Reports for campaigns sent more than days_old ago will not be collected. +# ## 0 means collect all. +# days_old = 0 +# ## Campaign ID to get, if empty gets all campaigns, this option overrides days_old +# # campaign_id = "" + + +# # Read metrics from one or many memcached servers +# [[inputs.memcached]] +# ## An array of address to gather stats about. Specify an ip on hostname +# ## with optional port. ie localhost, 10.0.0.1:11211, etc. +# servers = ["localhost:11211"] +# # unix_sockets = ["/var/run/memcached.sock"] + + +# # Telegraf plugin for gathering metrics from N Mesos masters +# [[inputs.mesos]] +# ## Timeout, in ms. +# timeout = 100 +# ## A list of Mesos masters. +# masters = ["localhost:5050"] +# ## Master metrics groups to be collected, by default, all enabled. +# master_collections = [ +# "resources", +# "master", +# "system", +# "agents", +# "frameworks", +# "tasks", +# "messages", +# "evqueue", +# "registrar", +# ] +# ## A list of Mesos slaves, default is [] +# # slaves = [] +# ## Slave metrics groups to be collected, by default, all enabled. +# # slave_collections = [ +# # "resources", +# # "agent", +# # "system", +# # "executors", +# # "tasks", +# # "messages", +# # ] + + +# # Read metrics from one or many MongoDB servers +# [[inputs.mongodb]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port add password. ie, +# ## mongodb://user:auth_key@10.10.3.30:27017, +# ## mongodb://10.10.3.33:18832, +# ## 10.0.0.1:10000, etc. +# servers = ["127.0.0.1:27017"] +# gather_perdb_stats = false + + +# # Read metrics from one or many mysql servers +# [[inputs.mysql]] +# ## specify servers via a url matching: +# ## [username[:password]@][protocol[(address)]]/[?tls=[true|false|skip-verify]] +# ## see https://github.com/go-sql-driver/mysql#dsn-data-source-name +# ## e.g. +# ## servers = ["user:passwd@tcp(127.0.0.1:3306)/?tls=false"] +# ## servers = ["user@tcp(127.0.0.1:3306)/?tls=false"] +# # +# ## If no servers are specified, then localhost is used as the host. +# servers = ["tcp(127.0.0.1:3306)/"] +# ## the limits for metrics form perf_events_statements +# perf_events_statements_digest_text_limit = 120 +# perf_events_statements_limit = 250 +# perf_events_statements_time_limit = 86400 +# # +# ## if the list is empty, then metrics are gathered from all databasee tables +# table_schema_databases = [] +# # +# ## gather metrics from INFORMATION_SCHEMA.TABLES for databases provided above list +# gather_table_schema = false +# # +# ## gather thread state counts from INFORMATION_SCHEMA.PROCESSLIST +# gather_process_list = true +# # +# ## gather auto_increment columns and max values from information schema +# gather_info_schema_auto_inc = true +# # +# ## gather metrics from SHOW SLAVE STATUS command output +# gather_slave_status = true +# # +# ## gather metrics from SHOW BINARY LOGS command output +# gather_binary_logs = false +# # +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_TABLE +# gather_table_io_waits = false +# # +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_LOCK_WAITS +# gather_table_lock_waits = false +# # +# ## gather metrics from PERFORMANCE_SCHEMA.TABLE_IO_WAITS_SUMMARY_BY_INDEX_USAGE +# gather_index_io_waits = false +# # +# ## gather metrics from PERFORMANCE_SCHEMA.EVENT_WAITS +# gather_event_waits = false +# # +# ## gather metrics from PERFORMANCE_SCHEMA.FILE_SUMMARY_BY_EVENT_NAME +# gather_file_events_stats = false +# # +# ## gather metrics from PERFORMANCE_SCHEMA.EVENTS_STATEMENTS_SUMMARY_BY_DIGEST +# gather_perf_events_statements = false +# # +# ## Some queries we may want to run less often (such as SHOW GLOBAL VARIABLES) +# interval_slow = "30m" + + +# # Read metrics about network interface usage +# [[inputs.net]] +# ## By default, telegraf gathers stats from any up interface (excluding loopback) +# ## Setting interfaces will tell it to gather these explicit interfaces, +# ## regardless of status. +# ## +# # interfaces = ["eth0"] + + +# # TCP or UDP 'ping' given url and collect response time in seconds +# [[inputs.net_response]] +# ## Protocol, must be "tcp" or "udp" +# ## NOTE: because the "udp" protocol does not respond to requests, it requires +# ## a send/expect string pair (see below). +# protocol = "tcp" +# ## Server address (default localhost) +# address = "localhost:80" +# ## Set timeout +# timeout = "1s" +# +# ## Set read timeout (only used if expecting a response) +# read_timeout = "1s" +# +# ## The following options are required for UDP checks. For TCP, they are +# ## optional. The plugin will send the given string to the server and then +# ## expect to receive the given 'expect' string back. +# ## string sent to the server +# # send = "ssh" +# ## expected string in answer +# # expect = "ssh" + + +# # Read TCP metrics such as established, time wait and sockets counts. +# [[inputs.netstat]] +# # no configuration + + +# # Read Nginx's basic status information (ngx_http_stub_status_module) +# [[inputs.nginx]] +# ## An array of Nginx stub_status URI to gather stats. +# urls = ["http://localhost/status"] + + +# # Read NSQ topic and channel statistics. +# [[inputs.nsq]] +# ## An array of NSQD HTTP API endpoints +# endpoints = ["http://localhost:4151"] + + +# # Collect kernel snmp counters and network interface statistics +# [[inputs.nstat]] +# ## file paths for proc files. If empty default paths will be used: +# ## /proc/net/netstat, /proc/net/snmp, /proc/net/snmp6 +# ## These can also be overridden with env variables, see README. +# proc_net_netstat = "/proc/net/netstat" +# proc_net_snmp = "/proc/net/snmp" +# proc_net_snmp6 = "/proc/net/snmp6" +# ## dump metrics with 0 values too +# dump_zeros = true + + +# # Get standard NTP query metrics, requires ntpq executable. +# [[inputs.ntpq]] +# ## If false, set the -n ntpq flag. Can reduce metric gather time. +# dns_lookup = true + + +# # Read metrics of passenger using passenger-status +# [[inputs.passenger]] +# ## Path of passenger-status. +# ## +# ## Plugin gather metric via parsing XML output of passenger-status +# ## More information about the tool: +# ## https://www.phusionpassenger.com/library/admin/apache/overall_status_report.html +# ## +# ## If no path is specified, then the plugin simply execute passenger-status +# ## hopefully it can be found in your PATH +# command = "passenger-status -v --show=xml" + + +# # Read metrics of phpfpm, via HTTP status page or socket +# [[inputs.phpfpm]] +# ## An array of addresses to gather stats about. Specify an ip or hostname +# ## with optional port and path +# ## +# ## Plugin can be configured in three modes (either can be used): +# ## - http: the URL must start with http:// or https://, ie: +# ## "http://localhost/status" +# ## "http://192.168.130.1/status?full" +# ## +# ## - unixsocket: path to fpm socket, ie: +# ## "/var/run/php5-fpm.sock" +# ## or using a custom fpm status path: +# ## "/var/run/php5-fpm.sock:fpm-custom-status-path" +# ## +# ## - fcgi: the URL must start with fcgi:// or cgi://, and port must be present, ie: +# ## "fcgi://10.0.0.12:9000/status" +# ## "cgi://10.0.10.12:9001/status" +# ## +# ## Example of multiple gathering from local socket and remove host +# ## urls = ["http://192.168.1.20/status", "/tmp/fpm.sock"] +# urls = ["http://localhost/status"] + + +# # Ping given url(s) and return statistics +# [[inputs.ping]] +# ## NOTE: this plugin forks the ping command. You may need to set capabilities +# ## via setcap cap_net_raw+p /bin/ping +# # +# ## urls to ping +# urls = ["www.google.com"] # required +# ## number of pings to send per collection (ping -c ) +# # count = 1 +# ## interval, in s, at which to ping. 0 == default (ping -i ) +# # ping_interval = 1.0 +# ## per-ping timeout, in s. 0 == no timeout (ping -W ) +# # timeout = 1.0 +# ## interface to send ping from (ping -I ) +# # interface = "" + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# ## +# ## All connection parameters are optional. +# ## +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# ## +# address = "host=localhost user=postgres sslmode=disable" +# +# ## A list of databases to explicitly ignore. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'databases' option. +# # ignored_databases = ["postgres", "template0", "template1"] +# +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. Do NOT use with the 'ignore_databases' option. +# # databases = ["app_production", "testing"] + + +# # Read metrics from one or many postgresql servers +# [[inputs.postgresql_extensible]] +# ## specify address via a url matching: +# ## postgres://[pqgotest[:password]]@localhost[/dbname]\ +# ## ?sslmode=[disable|verify-ca|verify-full] +# ## or a simple string: +# ## host=localhost user=pqotest password=... sslmode=... dbname=app_production +# # +# ## All connection parameters are optional. # +# ## Without the dbname parameter, the driver will default to a database +# ## with the same name as the user. This dbname is just for instantiating a +# ## connection with the server and doesn't restrict the databases we are trying +# ## to grab metrics for. +# # +# address = "host=localhost user=postgres sslmode=disable" +# ## A list of databases to pull metrics about. If not specified, metrics for all +# ## databases are gathered. +# ## databases = ["app_production", "testing"] +# # +# # outputaddress = "db01" +# ## A custom name for the database that will be used as the "server" tag in the +# ## measurement output. If not specified, a default one generated from +# ## the connection address is used. +# # +# ## Define the toml config where the sql queries are stored +# ## New queries can be added, if the withdbname is set to true and there is no +# ## databases defined in the 'databases field', the sql query is ended by a +# ## 'is not null' in order to make the query succeed. +# ## Example : +# ## The sqlquery : "SELECT * FROM pg_stat_database where datname" become +# ## "SELECT * FROM pg_stat_database where datname IN ('postgres', 'pgbench')" +# ## because the databases variable was set to ['postgres', 'pgbench' ] and the +# ## withdbname was true. Be careful that if the withdbname is set to false you +# ## don't have to define the where clause (aka with the dbname) the tagvalue +# ## field is used to define custom tags (separated by commas) +# ## The optional "measurement" value can be used to override the default +# ## output measurement name ("postgresql"). +# # +# ## Structure : +# ## [[inputs.postgresql_extensible.query]] +# ## sqlquery string +# ## version string +# ## withdbname boolean +# ## tagvalue string (comma separated) +# ## measurement string +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_database" +# version=901 +# withdbname=false +# tagvalue="" +# measurement="" +# [[inputs.postgresql_extensible.query]] +# sqlquery="SELECT * FROM pg_stat_bgwriter" +# version=901 +# withdbname=false +# tagvalue="postgresql.stats" + + +# # Read metrics from one or many PowerDNS servers +# [[inputs.powerdns]] +# ## An array of sockets to gather stats about. +# ## Specify a path to unix socket. +# unix_sockets = ["/var/run/pdns.controlsocket"] + + +# # Monitor process cpu and memory usage +# [[inputs.procstat]] +# ## Must specify one of: pid_file, exe, or pattern +# ## PID file to monitor process +# pid_file = "/var/run/nginx.pid" +# ## executable name (ie, pgrep ) +# # exe = "nginx" +# ## pattern as argument for pgrep (ie, pgrep -f ) +# # pattern = "nginx" +# ## user as argument for pgrep (ie, pgrep -u ) +# # user = "nginx" +# +# ## override for process_name +# ## This is optional; default is sourced from /proc//status +# # process_name = "bar" +# ## Field name prefix +# prefix = "" +# ## comment this out if you want raw cpu_time stats +# fielddrop = ["cpu_time_*"] +# ## This is optional; moves pid into a tag instead of a field +# pid_tag = false + + +# # Read metrics from one or many prometheus clients +# [[inputs.prometheus]] +# ## An array of urls to scrape metrics from. +# urls = ["http://localhost:9100/metrics"] +# +# ## Use bearer token for authorization +# # bearer_token = /path/to/bearer/token +# +# ## Specify timeout duration for slower prometheus clients (default is 3s) +# # response_timeout = "3s" +# +# ## Optional SSL Config +# # ssl_ca = /path/to/cafile +# # ssl_cert = /path/to/certfile +# # ssl_key = /path/to/keyfile +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false + + +# # Reads last_run_summary.yaml file and converts to measurments +# [[inputs.puppetagent]] +# ## Location of puppet last run summary file +# location = "/var/lib/puppet/state/last_run_summary.yaml" + + +# # Read metrics from one or many RabbitMQ servers via the management API +# [[inputs.rabbitmq]] +# # url = "http://localhost:15672" +# # name = "rmq-server-1" # optional tag +# # username = "guest" +# # password = "guest" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Optional request timeouts +# ## +# ## ResponseHeaderTimeout, if non-zero, specifies the amount of time to wait +# ## for a server's response headers after fully writing the request. +# # header_timeout = "3s" +# ## +# ## client_timeout specifies a time limit for requests made by this client. +# ## Includes connection time, any redirects, and reading the response body. +# # client_timeout = "4s" +# +# ## A list of nodes to pull metrics about. If not specified, metrics for +# ## all nodes are gathered. +# # nodes = ["rabbit@node1", "rabbit@node2"] + + +# # Read raindrops stats (raindrops - real-time stats for preforking Rack servers) +# [[inputs.raindrops]] +# ## An array of raindrops middleware URI to gather stats. +# urls = ["http://localhost:8080/_raindrops"] + + +# # Read metrics from one or many redis servers +# [[inputs.redis]] +# ## specify servers via a url matching: +# ## [protocol://][:password]@address[:port] +# ## e.g. +# ## tcp://localhost:6379 +# ## tcp://:password@192.168.99.100 +# ## unix:///var/run/redis.sock +# ## +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 6379 is used +# servers = ["tcp://localhost:6379"] + + +# # Read metrics from one or many RethinkDB servers +# [[inputs.rethinkdb]] +# ## An array of URI to gather stats about. Specify an ip or hostname +# ## with optional port add password. ie, +# ## rethinkdb://user:auth_key@10.10.3.30:28105, +# ## rethinkdb://10.10.3.33:18832, +# ## 10.0.0.1:10000, etc. +# servers = ["127.0.0.1:28015"] + + +# # Read metrics one or many Riak servers +# [[inputs.riak]] +# # Specify a list of one or more riak http servers +# servers = ["http://localhost:8098"] + + +# # Retrieves SNMP values from remote agents +# [[inputs.snmp]] +# agents = [ "127.0.0.1:161" ] +# ## Timeout for each SNMP query. +# timeout = "5s" +# ## Number of retries to attempt within timeout. +# retries = 3 +# ## SNMP version, values can be 1, 2, or 3 +# version = 2 +# +# ## SNMP community string. +# community = "public" +# +# ## The GETBULK max-repetitions parameter +# max_repetitions = 10 +# +# ## SNMPv3 auth parameters +# #sec_name = "myuser" +# #auth_protocol = "md5" # Values: "MD5", "SHA", "" +# #auth_password = "pass" +# #sec_level = "authNoPriv" # Values: "noAuthNoPriv", "authNoPriv", "authPriv" +# #context_name = "" +# #priv_protocol = "" # Values: "DES", "AES", "" +# #priv_password = "" +# +# ## measurement name +# name = "system" +# [[inputs.snmp.field]] +# name = "hostname" +# oid = ".1.0.0.1.1" +# [[inputs.snmp.field]] +# name = "uptime" +# oid = ".1.0.0.1.2" +# [[inputs.snmp.field]] +# name = "load" +# oid = ".1.0.0.1.3" +# [[inputs.snmp.field]] +# oid = "HOST-RESOURCES-MIB::hrMemorySize" +# +# [[inputs.snmp.table]] +# ## measurement name +# name = "remote_servers" +# inherit_tags = [ "hostname" ] +# [[inputs.snmp.table.field]] +# name = "server" +# oid = ".1.0.0.0.1.0" +# is_tag = true +# [[inputs.snmp.table.field]] +# name = "connections" +# oid = ".1.0.0.0.1.1" +# [[inputs.snmp.table.field]] +# name = "latency" +# oid = ".1.0.0.0.1.2" +# +# [[inputs.snmp.table]] +# ## auto populate table's fields using the MIB +# oid = "HOST-RESOURCES-MIB::hrNetworkTable" + + +# # DEPRECATED! PLEASE USE inputs.snmp INSTEAD. +# [[inputs.snmp_legacy]] +# ## Use 'oids.txt' file to translate oids to names +# ## To generate 'oids.txt' you need to run: +# ## snmptranslate -m all -Tz -On | sed -e 's/"//g' > /tmp/oids.txt +# ## Or if you have an other MIB folder with custom MIBs +# ## snmptranslate -M /mycustommibfolder -Tz -On -m all | sed -e 's/"//g' > oids.txt +# snmptranslate_file = "/tmp/oids.txt" +# [[inputs.snmp.host]] +# address = "192.168.2.2:161" +# # SNMP community +# community = "public" # default public +# # SNMP version (1, 2 or 3) +# # Version 3 not supported yet +# version = 2 # default 2 +# # SNMP response timeout +# timeout = 2.0 # default 2.0 +# # SNMP request retries +# retries = 2 # default 2 +# # Which get/bulk do you want to collect for this host +# collect = ["mybulk", "sysservices", "sysdescr"] +# # Simple list of OIDs to get, in addition to "collect" +# get_oids = [] +# +# [[inputs.snmp.host]] +# address = "192.168.2.3:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# collect = ["mybulk"] +# get_oids = [ +# "ifNumber", +# ".1.3.6.1.2.1.1.3.0", +# ] +# +# [[inputs.snmp.get]] +# name = "ifnumber" +# oid = "ifNumber" +# +# [[inputs.snmp.get]] +# name = "interface_speed" +# oid = "ifSpeed" +# instance = "0" +# +# [[inputs.snmp.get]] +# name = "sysuptime" +# oid = ".1.3.6.1.2.1.1.3.0" +# unit = "second" +# +# [[inputs.snmp.bulk]] +# name = "mybulk" +# max_repetition = 127 +# oid = ".1.3.6.1.2.1.1" +# +# [[inputs.snmp.bulk]] +# name = "ifoutoctets" +# max_repetition = 127 +# oid = "ifOutOctets" +# +# [[inputs.snmp.host]] +# address = "192.168.2.13:161" +# #address = "127.0.0.1:161" +# community = "public" +# version = 2 +# timeout = 2.0 +# retries = 2 +# #collect = ["mybulk", "sysservices", "sysdescr", "systype"] +# collect = ["sysuptime" ] +# [[inputs.snmp.host.table]] +# name = "iftable3" +# include_instances = ["enp5s0", "eth1"] +# +# # SNMP TABLEs +# # table without mapping neither subtables +# [[inputs.snmp.table]] +# name = "iftable1" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# +# # table without mapping but with subtables +# [[inputs.snmp.table]] +# name = "iftable2" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# sub_tables = [".1.3.6.1.2.1.2.2.1.13"] +# +# # table with mapping but without subtables +# [[inputs.snmp.table]] +# name = "iftable3" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty. get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty, get all subtables +# +# # table with both mapping and subtables +# [[inputs.snmp.table]] +# name = "iftable4" +# oid = ".1.3.6.1.2.1.31.1.1.1" +# # if empty get all instances +# mapping_table = ".1.3.6.1.2.1.31.1.1.1.1" +# # if empty get all subtables +# # sub_tables could be not "real subtables" +# sub_tables=[".1.3.6.1.2.1.2.2.1.13", "bytes_recv", "bytes_send"] + + +# # Read metrics from Microsoft SQL Server +# [[inputs.sqlserver]] +# ## Specify instances to monitor with a list of connection strings. +# ## All connection parameters are optional. +# ## By default, the host is localhost, listening on default port, TCP 1433. +# ## for Windows, the user is the currently running AD user (SSO). +# ## See https://github.com/denisenkom/go-mssqldb for detailed connection +# ## parameters. +# # servers = [ +# # "Server=192.168.1.10;Port=1433;User Id=;Password=;app name=telegraf;log=1;", +# # ] + + +# # Inserts sine and cosine waves for demonstration purposes +# [[inputs.trig]] +# ## Set the amplitude +# amplitude = 10.0 + + +# # Read Twemproxy stats data +# [[inputs.twemproxy]] +# ## Twemproxy stats address and port (no scheme) +# addr = "localhost:22222" +# ## Monitor pool name +# pools = ["redis_pool", "mc_pool"] + + +# # A plugin to collect stats from Varnish HTTP Cache +# [[inputs.varnish]] +# ## The default location of the varnishstat binary can be overridden with: +# binary = "/usr/bin/varnishstat" +# +# ## By default, telegraf gather stats for 3 metric points. +# ## Setting stats will override the defaults shown below. +# ## Glob matching can be used, ie, stats = ["MAIN.*"] +# ## stats may also be set to ["*"], which will collect all stats +# stats = ["MAIN.cache_hit", "MAIN.cache_miss", "MAIN.uptime"] + + +# # Read metrics of ZFS from arcstats, zfetchstats, vdev_cache_stats, and pools +# [[inputs.zfs]] +# ## ZFS kstat path. Ignored on FreeBSD +# ## If not specified, then default is: +# # kstatPath = "/proc/spl/kstat/zfs" +# +# ## By default, telegraf gather all zfs stats +# ## If not specified, then default is: +# # kstatMetrics = ["arcstats", "zfetchstats", "vdev_cache_stats"] +# +# ## By default, don't gather zpool stats +# # poolMetrics = false + + +# # Reads 'mntr' stats from one or many zookeeper servers +# [[inputs.zookeeper]] +# ## An array of address to gather stats about. Specify an ip or hostname +# ## with port. ie localhost:2181, 10.0.0.1:2181, etc. +# +# ## If no servers are specified, then localhost is used as the host. +# ## If no port is specified, 2181 is used +# servers = [":2181"] + + + +############################################################################### +# SERVICE INPUT PLUGINS # +############################################################################### + +# # Influx HTTP write listener +# [[inputs.http_listener]] +# ## Address and port to host HTTP listener on +# service_address = ":8186" +# +# ## maximum duration before timing out read of the request +# read_timeout = "10s" +# ## maximum duration before timing out write of the response +# write_timeout = "10s" +# +# ## Maximum allowed http request body size in bytes. +# ## 0 means to use the default of 536,870,912 bytes (500 mebibytes) +# max_body_size = 0 +# +# ## Maximum line size allowed to be sent in bytes. +# ## 0 means to use the default of 65536 bytes (64 kibibytes) +# max_line_size = 0 + + +# # Read metrics from Kafka topic(s) +# [[inputs.kafka_consumer]] +# ## topic(s) to consume +# topics = ["telegraf"] +# ## an array of Zookeeper connection strings +# zookeeper_peers = ["localhost:2181"] +# ## Zookeeper Chroot +# zookeeper_chroot = "" +# ## the name of the consumer group +# consumer_group = "telegraf_metrics_consumers" +# ## Offset (must be either "oldest" or "newest") +# offset = "oldest" +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Stream and parse log file(s). +# [[inputs.logparser]] +# ## Log files to parse. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## /var/log/**.log -> recursively find all .log files in /var/log +# ## /var/log/*/*.log -> find all .log files with a parent dir in /var/log +# ## /var/log/apache.log -> only tail the apache log file +# files = ["/var/log/apache/access.log"] +# ## Read file from beginning. +# from_beginning = false +# +# ## Parse logstash-style "grok" patterns: +# ## Telegraf built-in parsing patterns: https://goo.gl/dkay10 +# [inputs.logparser.grok] +# ## This is a list of patterns to check the given log file(s) for. +# ## Note that adding patterns here increases processing time. The most +# ## efficient configuration is to have one pattern per logparser. +# ## Other common built-in patterns are: +# ## %{COMMON_LOG_FORMAT} (plain apache & nginx access logs) +# ## %{COMBINED_LOG_FORMAT} (access logs + referrer & agent) +# patterns = ["%{COMBINED_LOG_FORMAT}"] +# ## Name of the outputted measurement name. +# measurement = "apache_access_log" +# ## Full path(s) to custom pattern files. +# custom_pattern_files = [] +# ## Custom patterns can also be defined here. Put one pattern per line. +# custom_patterns = ''' +# ''' + + +# # Read metrics from MQTT topic(s) +# [[inputs.mqtt_consumer]] +# servers = ["localhost:1883"] +# ## MQTT QoS, must be 0, 1, or 2 +# qos = 0 +# +# ## Topics to subscribe to +# topics = [ +# "telegraf/host01/cpu", +# "telegraf/+/mem", +# "sensors/#", +# ] +# +# # if true, messages that can't be delivered while the subscriber is offline +# # will be delivered when it comes back (such as on service restart). +# # NOTE: if true, client_id MUST be set +# persistent_session = false +# # If empty, a random client ID will be generated. +# client_id = "" +# +# ## username and password to connect MQTT server. +# # username = "telegraf" +# # password = "metricsmetricsmetricsmetrics" +# +# ## Optional SSL Config +# # ssl_ca = "/etc/telegraf/ca.pem" +# # ssl_cert = "/etc/telegraf/cert.pem" +# # ssl_key = "/etc/telegraf/key.pem" +# ## Use SSL but skip chain & host verification +# # insecure_skip_verify = false +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read metrics from NATS subject(s) +# [[inputs.nats_consumer]] +# ## urls of NATS servers +# # servers = ["nats://localhost:4222"] +# ## Use Transport Layer Security +# # secure = false +# ## subject(s) to consume +# # subjects = ["telegraf"] +# ## name a queue group +# # queue_group = "telegraf_consumers" +# +# ## Sets the limits for pending msgs and bytes for each subscription +# ## These shouldn't need to be adjusted except in very high throughput scenarios +# # pending_message_limit = 65536 +# # pending_bytes_limit = 67108864 +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Read NSQ topic for metrics. +# [[inputs.nsq_consumer]] +# ## An string representing the NSQD TCP Endpoint +# server = "localhost:4150" +# topic = "telegraf" +# channel = "consumer" +# max_in_flight = 100 +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# Statsd Server +[[inputs.statsd]] + ## Address and port to host UDP listener on + service_address = ":8125" + + ## The following configuration options control when telegraf clears it's cache + ## of previous values. If set to false, then telegraf will only clear it's + ## cache when the daemon is restarted. + ## Reset gauges every interval (default=true) + delete_gauges = true + ## Reset counters every interval (default=true) + delete_counters = true + ## Reset sets every interval (default=true) + delete_sets = true + ## Reset timings & histograms every interval (default=true) + delete_timings = true + + ## Percentiles to calculate for timing & histogram stats + percentiles = [90] + + ## separator to use between elements of a statsd metric + metric_separator = "_" + + ## Parses tags in the datadog statsd format + ## http://docs.datadoghq.com/guides/dogstatsd/ + parse_data_dog_tags = false + + ## Statsd data translation templates, more info can be read here: + ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md#graphite + # templates = [ + # "cpu.* measurement*" + # ] + + ## Number of UDP messages allowed to queue up, once filled, + ## the statsd server will start dropping packets + allowed_pending_messages = 10000 + + ## Number of timing/histogram values to track per-measurement in the + ## calculation of percentiles. Raising this limit increases the accuracy + ## of percentiles but also increases the memory usage and cpu time. + percentile_limit = 1000 + + +# # Stream a log file, like the tail -f command +# [[inputs.tail]] +# ## files to tail. +# ## These accept standard unix glob matching rules, but with the addition of +# ## ** as a "super asterisk". ie: +# ## "/var/log/**.log" -> recursively find all .log files in /var/log +# ## "/var/log/*/*.log" -> find all .log files with a parent dir in /var/log +# ## "/var/log/apache.log" -> just tail the apache log file +# ## +# ## See https://github.com/gobwas/glob for more examples +# ## +# files = ["/var/mymetrics.out"] +# ## Read file from beginning. +# from_beginning = false +# ## Whether file is a named pipe +# pipe = false +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Generic TCP listener +# [[inputs.tcp_listener]] +# ## Address and port to host TCP listener on +# # service_address = ":8094" +# +# ## Number of TCP messages allowed to queue up. Once filled, the +# ## TCP listener will start dropping packets. +# # allowed_pending_messages = 10000 +# +# ## Maximum number of concurrent TCP connections to allow +# # max_tcp_connections = 250 +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # Generic UDP listener +# [[inputs.udp_listener]] +# ## Address and port to host UDP listener on +# # service_address = ":8092" +# +# ## Number of UDP messages allowed to queue up. Once filled, the +# ## UDP listener will start dropping packets. +# # allowed_pending_messages = 10000 +# +# ## Set the buffer size of the UDP connection outside of OS default (in bytes) +# ## If set to 0, take OS default +# udp_buffer_size = 16777216 +# +# ## Data format to consume. +# ## Each data format has it's own unique set of configuration options, read +# ## more about them here: +# ## https://github.com/influxdata/telegraf/blob/master/docs/DATA_FORMATS_INPUT.md +# data_format = "influx" + + +# # A Webhooks Event collector +# [[inputs.webhooks]] +# ## Address and port to host Webhook listener on +# service_address = ":1619" +# +# [inputs.webhooks.filestack] +# path = "/filestack" +# +# [inputs.webhooks.github] +# path = "/github" +# +# [inputs.webhooks.mandrill] +# path = "/mandrill" +# +# [inputs.webhooks.rollbar] +# path = "/rollbar"