forked from Bridgeconn/vachan-api
-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
signoz dockercompose with CMS container
- Loading branch information
Showing
69 changed files
with
6,014 additions
and
0 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,82 @@ | ||
# Deploy | ||
|
||
Check that you have cloned [signoz/signoz](https://github.com/signoz/signoz) | ||
and currently are in `signoz/deploy` folder. | ||
|
||
## Docker | ||
|
||
If you don't have docker set up, please follow [this guide](https://docs.docker.com/engine/install/) | ||
to set up docker before proceeding with the next steps. | ||
|
||
### Using Install Script | ||
|
||
Now run the following command to install: | ||
|
||
```sh | ||
./install.sh | ||
``` | ||
|
||
### Using Docker Compose | ||
|
||
If you don't have docker-compose set up, please follow [this guide](https://docs.docker.com/compose/install/) | ||
to set up docker compose before proceeding with the next steps. | ||
|
||
For x86 chip (amd): | ||
|
||
```sh | ||
docker-compose -f docker/clickhouse-setup/docker-compose.yaml up -d | ||
``` | ||
|
||
Open http://localhost:3301 in your favourite browser. In couple of minutes, you should see | ||
the data generated from hotrod in SigNoz UI. | ||
|
||
## Kubernetes | ||
|
||
### Using Helm | ||
|
||
#### Bring up SigNoz cluster | ||
|
||
```sh | ||
helm repo add signoz https://charts.signoz.io | ||
|
||
kubectl create ns platform | ||
|
||
helm -n platform install my-release signoz/signoz | ||
``` | ||
|
||
To access the UI, you can `port-forward` the frontend service: | ||
|
||
```sh | ||
kubectl -n platform port-forward svc/my-release-frontend 3301:3301 | ||
``` | ||
|
||
Open http://localhost:3301 in your favourite browser. Few minutes after you generate load | ||
from the HotROD application, you should see the data generated from hotrod in SigNoz UI. | ||
|
||
#### Test HotROD application with SigNoz | ||
|
||
```sh | ||
kubectl create ns sample-application | ||
|
||
kubectl -n sample-application apply -f https://raw.githubusercontent.com/SigNoz/signoz/develop/sample-apps/hotrod/hotrod.yaml | ||
``` | ||
|
||
To generate load: | ||
|
||
```sh | ||
kubectl -n sample-application run strzal --image=djbingham/curl \ | ||
--restart='OnFailure' -i --tty --rm --command -- curl -X POST -F \ | ||
'user_count=6' -F 'spawn_rate=2' http://locust-master:8089/swarm | ||
``` | ||
|
||
To stop load: | ||
|
||
```sh | ||
kubectl -n sample-application run strzal --image=djbingham/curl \ | ||
--restart='OnFailure' -i --tty --rm --command -- curl \ | ||
http://locust-master:8089/stop | ||
``` | ||
|
||
## Uninstall/Troubleshoot? | ||
|
||
Go to our official documentation site [signoz.io/docs](https://signoz.io/docs) for more. |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,35 @@ | ||
global: | ||
resolve_timeout: 1m | ||
slack_api_url: 'https://hooks.slack.com/services/xxx' | ||
|
||
route: | ||
receiver: 'slack-notifications' | ||
|
||
receivers: | ||
- name: 'slack-notifications' | ||
slack_configs: | ||
- channel: '#alerts' | ||
send_resolved: true | ||
icon_url: https://avatars3.githubusercontent.com/u/3380462 | ||
title: |- | ||
[{{ .Status | toUpper }}{{ if eq .Status "firing" }}:{{ .Alerts.Firing | len }}{{ end }}] {{ .CommonLabels.alertname }} for {{ .CommonLabels.job }} | ||
{{- if gt (len .CommonLabels) (len .GroupLabels) -}} | ||
{{" "}}( | ||
{{- with .CommonLabels.Remove .GroupLabels.Names }} | ||
{{- range $index, $label := .SortedPairs -}} | ||
{{ if $index }}, {{ end }} | ||
{{- $label.Name }}="{{ $label.Value -}}" | ||
{{- end }} | ||
{{- end -}} | ||
) | ||
{{- end }} | ||
text: >- | ||
{{ range .Alerts -}} | ||
*Alert:* {{ .Annotations.title }}{{ if .Labels.severity }} - `{{ .Labels.severity }}`{{ end }} | ||
*Description:* {{ .Annotations.description }} | ||
*Details:* | ||
{{ range .Labels.SortedPairs }} • *{{ .Name }}:* `{{ .Value }}` | ||
{{ end }} | ||
{{ end }} |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,11 @@ | ||
groups: | ||
- name: ExampleCPULoadGroup | ||
rules: | ||
- alert: HighCpuLoad | ||
expr: system_cpu_load_average_1m > 0.1 | ||
for: 0m | ||
labels: | ||
severity: warning | ||
annotations: | ||
summary: High CPU load | ||
description: "CPU load is > 0.1\n VALUE = {{ $value }}\n LABELS = {{ $labels }}" |
75 changes: 75 additions & 0 deletions
75
deploy/docker-swarm/clickhouse-setup/clickhouse-cluster.xml
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,75 @@ | ||
<?xml version="1.0"?> | ||
<clickhouse> | ||
<!-- ZooKeeper is used to store metadata about replicas, when using Replicated tables. | ||
Optional. If you don't use replicated tables, you could omit that. | ||
See https://clickhouse.com/docs/en/engines/table-engines/mergetree-family/replication/ | ||
--> | ||
<zookeeper> | ||
<node index="1"> | ||
<host>zookeeper-1</host> | ||
<port>2181</port> | ||
</node> | ||
<!-- <node index="2"> | ||
<host>zookeeper-2</host> | ||
<port>2181</port> | ||
</node> | ||
<node index="3"> | ||
<host>zookeeper-3</host> | ||
<port>2181</port> | ||
</node> --> | ||
</zookeeper> | ||
|
||
<!-- Configuration of clusters that could be used in Distributed tables. | ||
https://clickhouse.com/docs/en/operations/table_engines/distributed/ | ||
--> | ||
<remote_servers> | ||
<cluster> | ||
<!-- Inter-server per-cluster secret for Distributed queries | ||
default: no secret (no authentication will be performed) | ||
If set, then Distributed queries will be validated on shards, so at least: | ||
- such cluster should exist on the shard, | ||
- such cluster should have the same secret. | ||
And also (and which is more important), the initial_user will | ||
be used as current user for the query. | ||
Right now the protocol is pretty simple and it only takes into account: | ||
- cluster name | ||
- query | ||
Also it will be nice if the following will be implemented: | ||
- source hostname (see interserver_http_host), but then it will depends from DNS, | ||
it can use IP address instead, but then the you need to get correct on the initiator node. | ||
- target hostname / ip address (same notes as for source hostname) | ||
- time-based security tokens | ||
--> | ||
<!-- <secret></secret> --> | ||
<shard> | ||
<!-- Optional. Whether to write data to just one of the replicas. Default: false (write data to all replicas). --> | ||
<!-- <internal_replication>false</internal_replication> --> | ||
<!-- Optional. Shard weight when writing data. Default: 1. --> | ||
<!-- <weight>1</weight> --> | ||
<replica> | ||
<host>clickhouse</host> | ||
<port>9000</port> | ||
<!-- Optional. Priority of the replica for load_balancing. Default: 1 (less value has more priority). --> | ||
<!-- <priority>1</priority> --> | ||
</replica> | ||
</shard> | ||
<!-- <shard> | ||
<replica> | ||
<host>clickhouse-2</host> | ||
<port>9000</port> | ||
</replica> | ||
</shard> | ||
<shard> | ||
<replica> | ||
<host>clickhouse-3</host> | ||
<port>9000</port> | ||
</replica> | ||
</shard> --> | ||
</cluster> | ||
</remote_servers> | ||
</clickhouse> |
Oops, something went wrong.