Skip to content

Ennorion/graphite-clickhouse

 
 

Repository files navigation

graphite-clickhouse

Graphite cluster backend with ClickHouse support

Work scheme

stack.png

Gray components are optional or alternative

TL;DR

Preconfigured docker-compose

Compatibility

Build

Required golang 1.7+

# build binary
git clone https://github.com/lomik/graphite-clickhouse.git
cd graphite-clickhouse
make

Installation

  1. Setup Yandex ClickHouse and carbon-clickhouse
  2. Setup and configure graphite-clickhouse
  3. Add graphite-clickhouse host:port to graphite-web CLUSTER_SERVERS

Configuration

Create /etc/graphite-clickhouse/rollup.xml with same content as for ClickHouse. Short sample:

<graphite_rollup>
        <default>
                <function>avg</function>
                <retention>
                        <age>0</age>
                        <precision>60</precision>
                </retention>
                <retention>
                        <age>2592000</age>
                        <precision>3600</precision>
                </retention>
        </default>
</graphite_rollup>

For complex clickhouse queries you might need to increase default query_max_size. To do that add following line to /etc/clickhouse-server/users.xml for the user you are using:

<!-- Default is 262144 -->
<max_query_size>10485760</max_query_size>

Create /etc/graphite-clickhouse/graphite-clickhouse.conf

[common]
listen = ":9090"
max-cpu = 1
# How frequently to call debug.FreeOSMemory() to return memory back to OS
# Setting it to zero disables this functionality
memory-return-interval = "0s"
# Limit number of results from find query. Zero = unlimited
max-metrics-in-find-answer = 0
# Daemon returns empty response if query matches any of regular expressions
# target-blacklist = ["^not_found.*"]

[clickhouse]
# You can add user/password (http://user:password@localhost:8123) and any clickhouse options (GET-parameters) to url
# It is recommended to create read-only user 
url = "http://localhost:8123"
# Add extra prefix (directory in graphite) for all metrics
extra-prefix = ""

# Default table with points
data-table = "graphite"
data-timeout = "1m0s"
# Rollup rules xml filename. Use `auto` magic word for select rollup rules from ClickHouse
rollup-conf = "/etc/graphite-clickhouse/rollup.xml"

# Table with series list (daily and full)
# https://github.com/lomik/graphite-clickhouse/wiki/IndexTable
index-table = "graphite_index"
# Use daily data from index table. This is useful for installations with big count of short-lived series but can be slower in other cases
index-use-daily = true
index-timeout = "1m"

# `tagged` table from carbon-clickhouse. Required for seriesByTag
tagged-table = ""

# Old index tables. Deprecated
tree-table = "graphite_tree"
# Optional table with daily series list.
# Useful for installations with big count of short-lived series
date-tree-table = ""
# Supported several schemas of date-tree-table:
# 1 (default): table only with Path, Date, Level fields. Described here: https://habrahabr.ru/company/avito/blog/343928/
# 2: table with Path, Date, Level, Deleted, Version fields. Table type "series" in the carbon-clickhouse
# 3: same as #2 but with reversed Path. Table type "series-reverse" in the carbon-clickhouse
date-tree-table-version = 0
tree-timeout = "1m0s"

[prometheus]
# The URL under which Prometheus is externally reachable (for example, if Prometheus is served via a reverse proxy). Used for
# generating relative and absolute links back to Prometheus itself. If the URL has a path portion, it will be used to prefix all
# HTTP endpoints served by Prometheus. If omitted, relevant URL components will be derived automatically.
external-url = ""
page-title = "Prometheus Time Series Collection and Processing Server"

[carbonlink]
server = ""
threads-per-request = 10
connect-timeout = "50ms"
query-timeout = "50ms"
total-timeout = "500ms"

# You can define multiple data tables (with points).
# The first table that matches is used.
#
# # Sample, archived table with points older 30d
# [[data-table]]
# table = "graphite_archive"
# min-age = "720h"
# 
# # All available options
# [[data-table]]
# # clickhouse table name
# table = "table_name"
# # points in table are stored with reverse path
# reverse = false
# # Custom rollup.xml for table. 
# # Magic word `auto` can be used for load rules from ClickHouse
# # With value `none` only rollup-default-precision and rollup-default-function will be used for rollup
# rollup-conf = ""
# # Which table to discover rollup-rules from. If not specified - will use what specified in "table" parameter.
# # Useful when reading from distributed table, but the rollup parameters are on the shard tables.
# # Can be in "database.table" form.
# rollup-auto-table = ""
# # Sets the default precision and function for rollup patterns which don't have age=0 retention defined.
# # If age=0 retention is defined in the rollup config then it takes precedence.
# # If left at the default value of 0 then no rollup is performed when the requested interval 
# # is not covered by any rollup rule. In this case the points will be served with 60 second precision.
# rollup-default-precision = 60
# rollup-default-function = "avg"
# # from >= now - {max-age}
# max-age = "240h"
# # until <= now - {min-age}
# min-age = "240h"
# # until - from <= {max-interval}
# max-interval = "24h"
# # until - from >= {min-interval}
# min-interval = "24h"
# # regexp.Match({target-match-any}, target[0]) || regexp.Match({target-match-any}, target[1]) || ...
# target-match-any = "regexp"
# # regexp.Match({target-match-all}, target[0]) && regexp.Match({target-match-all}, target[1]) && ...
# target-match-all = "regexp"

[[logging]]
logger = ""
file = "/var/log/graphite-clickhouse/graphite-clickhouse.log"
level = "info"
encoding = "mixed"
encoding-time = "iso8601"
encoding-duration = "seconds"

Run on same host with old graphite-web 0.9.x

By default graphite-web won't connect to CLUSTER_SERVER on localhost. Cheat:

class ForceLocal(str):
    def split(self, *args, **kwargs):
        return ["8.8.8.8", "8080"]

CLUSTER_SERVERS = [ForceLocal("127.0.0.1:9090")]

About

Graphite cluster backend with ClickHouse support

Resources

License

Stars

Watchers

Forks

Packages

No packages published

Languages

  • Go 97.2%
  • Shell 1.5%
  • Makefile 1.1%
  • Dockerfile 0.2%