diff --git a/.craft.yml b/.craft.yml new file mode 100644 index 0000000000..4524afff10 --- /dev/null +++ b/.craft.yml @@ -0,0 +1,6 @@ +minVersion: "0.23.1" +changelogPolicy: auto +artifactProvider: + name: none +targets: + - name: github diff --git a/.dockerignore b/.dockerignore deleted file mode 100644 index 6fd1d2510b..0000000000 --- a/.dockerignore +++ /dev/null @@ -1,9 +0,0 @@ -.git -.gitignore -.dockerignore -Makefile -README.md -*.pyc -*.tar -docker-compose.yml -data/ diff --git a/.editorconfig b/.editorconfig new file mode 100644 index 0000000000..4069959185 --- /dev/null +++ b/.editorconfig @@ -0,0 +1,16 @@ +root = true + +[*] +charset = utf-8 +end_of_line = lf +indent_style = space +insert_final_newline = true + +[*.sh] +indent_size = 2 + +[*.yml] +indent_size = 2 + +[nginx/*.conf] +indent_style = tab diff --git a/.env b/.env new file mode 100644 index 0000000000..a7830cfc4c --- /dev/null +++ b/.env @@ -0,0 +1,12 @@ +COMPOSE_PROJECT_NAME=sentry_onpremise +SENTRY_EVENT_RETENTION_DAYS=90 +# You can either use a port number or an IP:PORT combo for SENTRY_BIND +# See https://docs.docker.com/compose/compose-file/#ports for more +SENTRY_BIND=9000 +# Set SENTRY_MAIL_HOST to a valid FQDN (host/domain name) to be able to send emails! +# SENTRY_MAIL_HOST=example.com +SENTRY_IMAGE=getsentry/sentry:nightly +SNUBA_IMAGE=getsentry/snuba:nightly +RELAY_IMAGE=getsentry/relay:nightly +SYMBOLICATOR_IMAGE=getsentry/symbolicator:nightly +WAL2JSON_VERSION=latest \ No newline at end of file diff --git a/.env.example b/.env.example deleted file mode 100644 index c560055d33..0000000000 --- a/.env.example +++ /dev/null @@ -1,3 +0,0 @@ -# Run `docker-compose run web config generate-secret-key` -# to get the SENTRY_SECRET_KEY value. -SENTRY_SECRET_KEY= diff --git a/.gitattributes b/.gitattributes new file mode 100644 index 0000000000..0beb4bb0ad --- /dev/null +++ b/.gitattributes @@ -0,0 +1,5 @@ +/.gitattributes export-ignore +/.gitignore export-ignore +/.github export-ignore +/.editorconfig export-ignore +/.craft.yml export-ignore diff --git a/.github/ISSUE_TEMPLATE/bug.yml b/.github/ISSUE_TEMPLATE/bug.yml new file mode 100644 index 0000000000..b1ec358625 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/bug.yml @@ -0,0 +1,46 @@ +name: 🐞 Bug Report +description: Tell us about something that's not working the way we (probably) intend. +body: + - type: input + id: version + attributes: + label: Version + placeholder: 21.7.0 ← should look like this (check the footer) + description: What version of self-hosted Sentry are you running? + validations: + required: true + - type: textarea + id: repro + attributes: + label: Steps to Reproduce + description: How can we see what you're seeing? Specific is terrific. + placeholder: |- + 1. foo + 2. bar + 3. baz + validations: + required: true + - type: textarea + id: expected + attributes: + label: Expected Result + validations: + required: true + - type: textarea + id: actual + attributes: + label: Actual Result + description: Logs? Screenshots? Yes, please. + placeholder: |- + e.g.: + - latest install logs: `ls -1 sentry_install_log-*.txt | tail -1 | xargs cat` + - `docker-compose logs` output + validations: + required: true + - type: markdown + attributes: + value: |- + ## Thanks πŸ™ + Check our [triage docs](https://open.sentry.io/triage/) for what to expect next. + validations: + required: false diff --git a/.github/ISSUE_TEMPLATE/config.yml b/.github/ISSUE_TEMPLATE/config.yml new file mode 100644 index 0000000000..22655b71b5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/config.yml @@ -0,0 +1,8 @@ +blank_issues_enabled: false +contact_links: + - name: Question about self-hosting/on-premise + url: https://forum.sentry.io + about: Please use the community forums for questions + - name: Report a security vulnerability + url: https://sentry.io/security/#vulnerability-disclosure + about: Please see our guide for responsible disclosure. diff --git a/.github/ISSUE_TEMPLATE/feature.yml b/.github/ISSUE_TEMPLATE/feature.yml new file mode 100644 index 0000000000..49cfa3e1c5 --- /dev/null +++ b/.github/ISSUE_TEMPLATE/feature.yml @@ -0,0 +1,28 @@ +name: πŸ’‘ Feature Request +description: Tell us about a problem our software could solve but doesn't. +body: + - type: textarea + id: problem + attributes: + label: Problem Statement + description: What problem could `onpremise` solve that it doesn't? + placeholder: |- + I want to make whirled peas, but `onpremise` doesn't blend. + validations: + required: true + - type: textarea + id: expected + attributes: + label: Solution Brainstorm + description: We know you have bright ideas to share ... share away, friend. + placeholder: |- + Add a blender to `onpremise`. + validations: + required: false + - type: markdown + attributes: + value: |- + ## Thanks πŸ™ + Check our [triage docs](https://open.sentry.io/triage/) for what to expect next. + validations: + required: false diff --git a/.github/workflows/issue-routing-helper.yml b/.github/workflows/issue-routing-helper.yml new file mode 100644 index 0000000000..a36179554e --- /dev/null +++ b/.github/workflows/issue-routing-helper.yml @@ -0,0 +1,46 @@ +name: Issue Routing Helper +on: + issues: + types: [labeled] +env: + # Use GH_RELEASE_PAT as github-actions bot is not allowed to ping teams + GH_TOKEN: ${{ secrets.GH_RELEASE_PAT }} + GH_REPO: ${{ github.repository }} +jobs: + route: + runs-on: ubuntu-latest + if: >- + github.event.issue.state == 'open' + && + startsWith(github.event.label.name, 'Team: ') + && + !contains(github.event.issue.labels.*.name, 'Status: Backlog') + && + !contains(github.event.issue.labels.*.name, 'Status: In Progress') + steps: + - name: "Ensure a single 'Team: *' label with 'Status: Untriaged'" + run: | + labels_to_remove=$(gh api --paginate "/repos/$GH_REPO/labels" -q '[.[].name | select((startswith("Team: ") or startswith("Status: ")) and . != "${{ github.event.label.name }}" and . != "Status: Untriaged")] | join(",")') + gh issue edit ${{ github.event.issue.number }} --remove-label "$labels_to_remove" --add-label '${{ github.event.label.name }},Status: Untriaged' + - name: "Mention/ping assigned team for triage" + run: | + # Get team label mention name: + team_label='${{ github.event.label.name }}' + team_name="${team_label:6}" # Strip the first 6 chars, which is the 'Team: ' part + team_slug="${team_name// /-}" # Replace spaces with hyphens for url/slug friendliness + mention_slug=$(gh api "/orgs/getsentry/teams/$team_slug" -q .slug || true) + + if [[ -z "$mention_slug" ]]; then + echo "Couldn't find team mention from slug, trying the label description" + team_slug=$(gh api "/repos/$GH_REPO/labels/$team_label" -q '.description') + mention_slug=$(gh api "/orgs/getsentry/teams/$team_slug" -q .slug || true) + fi + + if [[ -n "$mention_slug" ]]; then + echo "Routing to @getsentry/$mention_slug for [triage](https://develop.sentry.dev/processing-tickets/#3-triage). ⏲️" > comment_body + else + echo "[Failed]($GITHUB_SERVER_URL/$GITHUB_REPOSITORY/actions/runs/$GITHUB_RUN_ID) to route to \`${{ github.event.label.name }}\`. πŸ˜•" > comment_body + echo "" >> comment_body + echo "Defaulting to @getsentry/open-source for [triage](https://develop.sentry.dev/processing-tickets/#3-triage). ⏲️" >> comment_body + fi + gh issue comment ${{ github.event.issue.number }} --body-file comment_body diff --git a/.github/workflows/issue-status-helper.yml b/.github/workflows/issue-status-helper.yml new file mode 100644 index 0000000000..b5164750e8 --- /dev/null +++ b/.github/workflows/issue-status-helper.yml @@ -0,0 +1,16 @@ +name: Issue Status Helper +on: + issues: + types: [labeled] +jobs: + ensure_one_status: + runs-on: ubuntu-latest + if: "startsWith(github.event.label.name, 'Status: ')" + steps: + - name: "Ensure a single 'Status: *' label" + env: + GH_TOKEN: ${{ secrets.GITHUB_TOKEN }} + GH_REPO: ${{ github.repository }} + run: | + labels_to_remove=$(gh api --paginate "/repos/$GH_REPO/labels" -q '[.[].name | select(startswith("Status: ") and . != "${{ github.event.label.name }}")] | join(",")') + gh issue edit ${{ github.event.issue.number }} --remove-label "$labels_to_remove" --add-label "${{ github.event.label.name }}" \ No newline at end of file diff --git a/.github/workflows/lock.yml b/.github/workflows/lock.yml new file mode 100644 index 0000000000..5d57d6f4f2 --- /dev/null +++ b/.github/workflows/lock.yml @@ -0,0 +1,16 @@ +name: 'Lock closed issues/PRs' +on: + schedule: + - cron: '11 3 * * *' + workflow_dispatch: +jobs: + lock: + runs-on: ubuntu-latest + steps: + - uses: getsentry/forked-action-lock-threads@master + with: + github-token: ${{ github.token }} + issue-lock-inactive-days: 15 + issue-lock-reason: '' + pr-lock-inactive-days: 15 + pr-lock-reason: '' diff --git a/.github/workflows/release.yml b/.github/workflows/release.yml new file mode 100644 index 0000000000..1d2afb65ee --- /dev/null +++ b/.github/workflows/release.yml @@ -0,0 +1,32 @@ +name: Release +on: + workflow_dispatch: + inputs: + version: + description: Version to release (optional) + required: false + force: + description: Force a release even when there are release-blockers (optional) + required: false + schedule: + # We want the release to be at 10 or 11am Pacific Time + # We also make this an hour after all others such as Sentry, + # Snuba, and Relay to make sure their releases finish. + - cron: "0 18 15 * *" +jobs: + release: + runs-on: ubuntu-latest + name: "Release a new version" + steps: + - uses: actions/checkout@v2 + with: + token: ${{ secrets.GH_RELEASE_PAT }} + fetch-depth: 0 + - name: Prepare release + uses: getsentry/action-prepare-release@v1 + env: + GITHUB_TOKEN: ${{ secrets.GH_RELEASE_PAT }} + with: + version: ${{ github.event.inputs.version }} + force: ${{ github.event.inputs.force }} + calver: true diff --git a/.github/workflows/stale.yml b/.github/workflows/stale.yml new file mode 100644 index 0000000000..0a47156580 --- /dev/null +++ b/.github/workflows/stale.yml @@ -0,0 +1,47 @@ +name: 'Close stale issues/PRs' +on: + schedule: + - cron: '* */6 * * *' + workflow_dispatch: +jobs: + stale: + runs-on: ubuntu-latest + steps: + - uses: actions/stale@87c2b794b9b47a9bec68ae03c01aeb572ffebdb1 + with: + repo-token: ${{ github.token }} + days-before-stale: 21 + days-before-close: 7 + only-labels: "" + operations-per-run: 100 + remove-stale-when-updated: true + debug-only: false + ascending: false + + exempt-issue-labels: "Status: Backlog,Status: In Progress" + stale-issue-label: "Status: Stale" + stale-issue-message: |- + This issue has gone three weeks without activity. In another week, I will close it. + + But! If you comment or otherwise update it, I will reset the clock, and if you label it `Status: Backlog` or `Status: In Progress`, I will leave it alone ... forever! + + ---- + + "A weed is but an unloved flower." ― _Ella Wheeler Wilcox_ πŸ₯€ + skip-stale-issue-message: false + close-issue-label: "" + close-issue-message: "" + + exempt-pr-labels: "Status: Backlog,Status: In Progress" + stale-pr-label: "Status: Stale" + stale-pr-message: |- + This pull request has gone three weeks without activity. In another week, I will close it. + + But! If you comment or otherwise update it, I will reset the clock, and if you label it `Status: Backlog` or `Status: In Progress`, I will leave it alone ... forever! + + ---- + + "A weed is but an unloved flower." ― _Ella Wheeler Wilcox_ πŸ₯€ + skip-stale-pr-message: false + close-pr-label: + close-pr-message: "" diff --git a/.github/workflows/test.yml b/.github/workflows/test.yml new file mode 100644 index 0000000000..0d3db0e286 --- /dev/null +++ b/.github/workflows/test.yml @@ -0,0 +1,67 @@ +name: Test +on: + # Run CI on all pushes to the master and release/** branches, and on all new + # pull requests, and on all pushes to pull requests (even if a pull request + # is not against master). + push: + branches: + - "master" + - "release/**" + pull_request: +defaults: + run: + shell: bash +jobs: + unit-test: + runs-on: ubuntu-20.04 + name: "unit tests" + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Unit Tests + working-directory: install + run: find ./ -type f -name "*-test.sh" -exec "./{}" \; + + integration-test: + runs-on: ubuntu-20.04 + name: "integration test" + strategy: + max-parallel: 1 + fail-fast: false + matrix: + include: + - compose_version: '1.28.0' + compose_path: '/usr/local/bin' + - compose_version: 'v2.0.1' + compose_path: '/usr/local/lib/docker/cli-plugins' + steps: + - name: Checkout + uses: actions/checkout@v2 + + - name: Get Compose + run: | + # Always remove `docker compose` support as that's the newer version + # and comes installed by default nowadays. + sudo rm -f "/usr/local/lib/docker/cli-plugins/docker-compose" + sudo rm -f "${{ matrix.compose_path }}/docker-compose" + sudo mkdir -p "${{ matrix.compose_path }}" + sudo curl -L https://github.com/docker/compose/releases/download/${{ matrix.compose_version }}/docker-compose-`uname -s`-`uname -m` -o "${{ matrix.compose_path }}/docker-compose" + sudo chmod +x "${{ matrix.compose_path }}/docker-compose" + + - name: Integration Test + run: | + echo "Testing initial install" + ./install.sh + ./_integration-test/run.sh + echo "Testing in-place upgrade" + # Also test plugin installation here + echo "sentry-auth-oidc" >> sentry/requirements.txt + ./install.sh --minimize-downtime + ./_integration-test/run.sh + + - name: Inspect failure + if: failure() + run: | + docker compose ps + docker compose logs diff --git a/.gitignore b/.gitignore index 5ed7352fd9..da5404445a 100644 --- a/.gitignore +++ b/.gitignore @@ -36,6 +36,7 @@ var/ # Installer logs pip-log.txt pip-delete-this-directory.txt +sentry_install_log*.txt # Unit test / coverage reports htmlcov/ @@ -45,7 +46,7 @@ htmlcov/ .cache nosetests.xml coverage.xml -*,cover +*.cover .hypothesis/ # Translations @@ -71,8 +72,25 @@ target/ # https://docs.docker.com/compose/extends/ docker-compose.override.yml -# env config -.env - *.tar data/ +.vscode/tags + +# custom Sentry config +sentry/sentry.conf.py +sentry/config.yml +sentry/*.bak +sentry/requirements.txt +relay/credentials.json +relay/config.yml +symbolicator/config.yml +geoip/GeoIP.conf +geoip/*.mmdb +geoip/.geoipupdate.lock + +# wal2json download +postgres/wal2json + +# integration testing +_integration-test/custom-ca-roots/nginx/* +sentry/test-custom-ca-roots.py diff --git a/CHANGELOG.md b/CHANGELOG.md new file mode 100644 index 0000000000..c6defdd4ca --- /dev/null +++ b/CHANGELOG.md @@ -0,0 +1,50 @@ +# Changelog + +## 21.10.0 + +### Support for Docker Compose v2 (ongoing) + +You asked for it and you did it! Sentry self-hosted now can work with Docker Compose v2 thanks to our community's contributions. + +PRs: #1116 + +### Various fixes & improvements + +- docs: simplify Linux `sudo` instructions in README (#1096) +- build: Set master version to nightly (58874cf9) + +## 21.9.0 + +- fix(healthcheck): Increase retries to 5 (#1072) +- fix(requirements): Make compose version check bw-compatible (#1068) +- ci: Test with the required minimum docker-compose (#1066) + Run tests using docker-compose `1.28.0` instead of latest +- fix(clickhouse): Use correct HTTP port for healthcheck (#1069) + Fixes the regular `Unexpected packet` errors in Clickhouse + +## 21.8.0 + +- feat: Support custom CA roots ([#27062](https://github.com/getsentry/sentry/pull/27062)), see the [docs](https://develop.sentry.dev/self-hosted/custom-ca-roots/) for more details. +- fix: Fix `curl` image to version 7.77.0 +- upgrade: docker-compose version to 1.29.2 +- feat: Leverage health checks for depends_on + +## 21.7.0 + +- No documented changes. + +## 21.6.3 + +- No documented changes. + +## 21.6.2 + +- BREAKING CHANGE: The frontend bundle will be loaded asynchronously (via [#25744](https://github.com/getsentry/sentry/pull/25744)). This is a breaking change that can affect custom plugins that access certain globals in the django template. Please see https://forum.sentry.io/t/breaking-frontend-changes-for-custom-plugins/14184 for more information. + +## 21.6.1 + +- No documented changes. + +## 21.6.0 + +- feat: Add healthchecks for redis, memcached and postgres (#975) diff --git a/Dockerfile b/Dockerfile deleted file mode 100644 index d1d7cb3a46..0000000000 --- a/Dockerfile +++ /dev/null @@ -1 +0,0 @@ -FROM sentry:9.1.2-onbuild diff --git a/LICENSE b/LICENSE new file mode 100644 index 0000000000..4e414943d8 --- /dev/null +++ b/LICENSE @@ -0,0 +1,104 @@ +Business Source License 1.1 + +Parameters + +Licensor: Functional Software, Inc. +Licensed Work: Sentry + The Licensed Work is (c) 2019 Functional Software, Inc. +Additional Use Grant: You may make use of the Licensed Work, provided that you do + not use the Licensed Work for an Application Monitoring + Service. + + An "Application Monitoring Service" is a commercial offering + that allows third parties (other than your employees and + contractors) to access the functionality of the Licensed + Work so that such third parties directly benefit from the + error-reporting or application monitoring features of the + Licensed Work. + +Change Date: 2024-10-15 + +Change License: Apache License, Version 2.0 + +For information about alternative licensing arrangements for the Software, +please visit: https://sentry.io/pricing/ + +Notice + +The Business Source License (this document, or the "License") is not an Open +Source license. However, the Licensed Work will eventually be made available +under an Open Source License, as stated in this License. + +License text copyright (c) 2017 MariaDB Corporation Ab, All Rights Reserved. +"Business Source License" is a trademark of MariaDB Corporation Ab. + +----------------------------------------------------------------------------- + +Business Source License 1.1 + +Terms + +The Licensor hereby grants you the right to copy, modify, create derivative +works, redistribute, and make non-production use of the Licensed Work. The +Licensor may make an Additional Use Grant, above, permitting limited +production use. + +Effective on the Change Date, or the fourth anniversary of the first publicly +available distribution of a specific version of the Licensed Work under this +License, whichever comes first, the Licensor hereby grants you rights under +the terms of the Change License, and the rights granted in the paragraph +above terminate. + +If your use of the Licensed Work does not comply with the requirements +currently in effect as described in this License, you must purchase a +commercial license from the Licensor, its affiliated entities, or authorized +resellers, or you must refrain from using the Licensed Work. + +All copies of the original and modified Licensed Work, and derivative works +of the Licensed Work, are subject to this License. This License applies +separately for each version of the Licensed Work and the Change Date may vary +for each version of the Licensed Work released by Licensor. + +You must conspicuously display this License on each original or modified copy +of the Licensed Work. If you receive the Licensed Work in original or +modified form from a third party, the terms and conditions set forth in this +License apply to your use of that work. + +Any use of the Licensed Work in violation of this License will automatically +terminate your rights under this License for the current and all other +versions of the Licensed Work. + +This License does not grant you any right in any trademark or logo of +Licensor or its affiliates (provided that you may use a trademark or logo of +Licensor as expressly required by this License). + +TO THE EXTENT PERMITTED BY APPLICABLE LAW, THE LICENSED WORK IS PROVIDED ON +AN "AS IS" BASIS. LICENSOR HEREBY DISCLAIMS ALL WARRANTIES AND CONDITIONS, +EXPRESS OR IMPLIED, INCLUDING (WITHOUT LIMITATION) WARRANTIES OF +MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE, NON-INFRINGEMENT, AND +TITLE. + +MariaDB hereby grants you permission to use this License’s text to license +your works, and to refer to it using the trademark "Business Source License", +as long as you comply with the Covenants of Licensor below. + +Covenants of Licensor + +In consideration of the right to use this License’s text and the "Business +Source License" name and trademark, Licensor covenants to MariaDB, and to all +other recipients of the licensed work to be provided by Licensor: + +1. To specify as the Change License the GPL Version 2.0 or any later version, + or a license that is compatible with GPL Version 2.0 or a later version, + where "compatible" means that software provided under the Change License can + be included in a program with software provided under GPL Version 2.0 or a + later version. Licensor may specify additional Change Licenses without + limitation. + +2. To either: (a) specify an additional grant of rights to use that does not + impose any additional restriction on the right granted in this License, as + the Additional Use Grant; or (b) insert the text "None". + +3. To specify a Change Date. + +4. Not to modify this License in any other way. diff --git a/Makefile b/Makefile deleted file mode 100644 index 57d2721e19..0000000000 --- a/Makefile +++ /dev/null @@ -1,21 +0,0 @@ -REPOSITORY?=sentry-onpremise -TAG?=latest - -OK_COLOR=\033[32;01m -NO_COLOR=\033[0m - -build: - @echo "$(OK_COLOR)==>$(NO_COLOR) Building $(REPOSITORY):$(TAG)" - @docker build --rm -t $(REPOSITORY):$(TAG) . - -$(REPOSITORY)_$(TAG).tar: build - @echo "$(OK_COLOR)==>$(NO_COLOR) Saving $(REPOSITORY):$(TAG) > $@" - @docker save $(REPOSITORY):$(TAG) > $@ - -push: build - @echo "$(OK_COLOR)==>$(NO_COLOR) Pushing $(REPOSITORY):$(TAG)" - @docker push $(REPOSITORY):$(TAG) - -all: build push - -.PHONY: all build push diff --git a/README.md b/README.md index abbd0172f8..5c93853f16 100644 --- a/README.md +++ b/README.md @@ -1,61 +1,50 @@ -# Sentry On-Premise +# Self-Hosted Sentry nightly Official bootstrap for running your own [Sentry](https://sentry.io/) with [Docker](https://www.docker.com/). ## Requirements - * Docker 1.10.0+ - * Compose 1.6.0+ _(optional)_ +* Docker 19.03.6+ +* Compose 1.28.0+ +* 4 CPU Cores +* 8 GB RAM +* 20 GB Free Disk Space -## Up and Running +## Setup -Assuming you've just cloned this repository, the following steps -will get you up and running in no time! +To get started with all the defaults, simply clone the repo and run `./install.sh` in your local check-out. Sentry uses Python 3 by default since December 4th, 2020 and Sentry 21.1.0 is the last version to support Python 2. -There may need to be modifications to the included `docker-compose.yml` file to accommodate your needs or your environment. These instructions are a guideline for what you should generally do. +During the install, a prompt will ask if you want to create a user account. If you require that the install not be blocked by the prompt, run `./install.sh --no-user-prompt`. -1. `docker volume create --name=sentry-data && docker volume create --name=sentry-postgres` - Make our local database and sentry volumes - Docker volumes have to be created manually, as they are declared as external to be more durable. -2. `cp -n .env.example .env` - create env config file -3. `docker-compose build` - Build and tag the Docker services -4. `docker-compose run --rm web config generate-secret-key` - Generate a secret key. - Add it to `.env` as `SENTRY_SECRET_KEY`. -5. `docker-compose run --rm web upgrade` - Build the database. - Use the interactive prompts to create a user account. -6. `docker-compose up -d` - Lift all services (detached/background mode). -7. Access your instance at `localhost:9000`! +Please visit [our documentation](https://develop.sentry.dev/self-hosted/) for everything else. -## Securing Sentry with SSL/TLS +## Tips & Tricks -If you'd like to protect your Sentry install with SSL/TLS, there are -fantastic SSL/TLS proxies like [HAProxy](http://www.haproxy.org/) -and [Nginx](http://nginx.org/). +### Event Retention -## Updating Sentry +Sentry comes with a cleanup cron job that prunes events older than `90 days` by default. If you want to change that, you can change the `SENTRY_EVENT_RETENTION_DAYS` environment variable in `.env` or simply override it in your environment. If you do not want the cleanup cron, you can remove the `sentry-cleanup` service from the `docker-compose.yml`file. -Updating Sentry using Compose is relatively simple. Just use the following steps to update. Make sure that you have the latest version set in your Dockerfile. Or use the latest version of this repository. +### Installing a specific SHA -Use the following steps after updating this repository or your Dockerfile: -```sh -docker-compose build # Build the services again after updating -docker-compose run --rm web upgrade # Run new migrations -docker-compose up -d # Recreate the services +If you want to install a specific release of Sentry, use the tags/releases on this repo. + +We continously push the Docker image for each commit made into [Sentry](https://github.com/getsentry/sentry), and other services such as [Snuba](https://github.com/getsentry/snuba) or [Symbolicator](https://github.com/getsentry/symbolicator) to [our Docker Hub](https://hub.docker.com/u/getsentry) and tag the latest version on master as `:nightly`. This is also usually what we have on sentry.io and what the install script uses. You can use a custom Sentry image, such as a modified version that you have built on your own, or simply a specific commit hash by setting the `SENTRY_IMAGE` environment variable to that image name before running `./install.sh`: + +```shell +SENTRY_IMAGE=getsentry/sentry:83b1380 ./install.sh ``` -## Running sentry cleanup +Note that this may not work for all commit SHAs as this repository evolves with Sentry and its satellite projects. It is highly recommended to check out a version of this repository that is close to the timestamp of the Sentry commit you are installing. + +### Using Linux -To free up DB space etc, run `sentry cleanup --days 90 --concurrency 25`. Steps to do so: -* Build docker containers locally by running `docker-compose build` -* make sure the correct AWS envs vars are set (e.g., run `./htprod` to set aws keys) -* run `docker run -it --entrypoint bash sentry-onpremise_cron` -* copy the env vars from ECS tasks and convert them into `export foo=bar` strings -* run `sentry cleanup --days 90` and make sure you have access to reources in the vpc (e.g., run `sshuttle -r vpn-prod 172.23.0.0/16 -v`) -* concurrency needs to be set or cleanup will take a very long time +If you are using Linux and you need to use `sudo` when running `./install.sh`, make sure to place the environment variable *after* `sudo`: + +```shell +sudo SENTRY_IMAGE=us.gcr.io/sentryio/sentry:83b1380 ./install.sh +``` -## Resources +Where you replace `83b1380` with the sha you want to use. - * [Documentation](https://docs.sentry.io/server/installation/docker/) - * [Bug Tracker](https://github.com/getsentry/onpremise) - * [Forums](https://forum.sentry.io/c/on-premise) - * [IRC](irc://chat.freenode.net/sentry) (chat.freenode.net, #sentry) - * [Docker Image](https://hub.docker.com/_/sentry/) +[build-status-image]: https://github.com/getsentry/onpremise/workflows/test/badge.svg +[build-status-url]: https://git.io/JUYkh diff --git a/_integration-test/custom-ca-roots/docker-compose.test.yml b/_integration-test/custom-ca-roots/docker-compose.test.yml new file mode 100644 index 0000000000..2bc40ba1b1 --- /dev/null +++ b/_integration-test/custom-ca-roots/docker-compose.test.yml @@ -0,0 +1,12 @@ +version: '3.4' +services: + fixture-custom-ca-roots: + image: nginx:1.21.0-alpine + restart: unless-stopped + volumes: + - ./_integration-test/custom-ca-roots/nginx:/etc/nginx:ro + networks: + default: + aliases: + - self.test + - fail.test diff --git a/_integration-test/custom-ca-roots/nginx/nginx.conf b/_integration-test/custom-ca-roots/nginx/nginx.conf new file mode 100644 index 0000000000..517aea4102 --- /dev/null +++ b/_integration-test/custom-ca-roots/nginx/nginx.conf @@ -0,0 +1,32 @@ +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + +events { + worker_connections 1024; +} + +http { + server { + listen 443 ssl; + server_name "self.test"; + ssl_certificate "/etc/nginx/self.test.crt"; + ssl_certificate_key "/etc/nginx/self.test.key"; + location / { + add_header Content-Type text/plain; + return 200 'ok'; + } + } + server { + listen 443 ssl; + server_name "fake.test"; + ssl_certificate "/etc/nginx/fake.test.crt"; + ssl_certificate_key "/etc/nginx/fake.test.key"; + location / { + add_header Content-Type text/plain; + return 200 'bad'; + } + } +} diff --git a/_integration-test/custom-ca-roots/setup.sh b/_integration-test/custom-ca-roots/setup.sh new file mode 100755 index 0000000000..a8cb2f1615 --- /dev/null +++ b/_integration-test/custom-ca-roots/setup.sh @@ -0,0 +1,47 @@ +#! /usr/bin/env bash +set -e + +export COMPOSE_FILE="../docker-compose.yml:./custom-ca-roots/docker-compose.test.yml" + +TEST_NGINX_CONF_PATH="./custom-ca-roots/nginx" +CUSTOM_CERTS_PATH="../certificates" + +# generate tightly constrained CA +# NB: `-addext` requires LibreSSL 3.1.0+, or OpenSSL (brew install openssl) +openssl req -x509 -new -nodes -newkey rsa:2048 -keyout $TEST_NGINX_CONF_PATH/ca.key \ +-sha256 -days 1 -out $TEST_NGINX_CONF_PATH/ca.crt -batch \ +-subj "/CN=TEST CA *DO NOT TRUST*" \ +-addext "keyUsage = critical, keyCertSign, cRLSign" \ +-addext "nameConstraints = critical, permitted;DNS:self.test" + +## Lines like the following are debug helpers ... +# openssl x509 -in nginx/ca.crt -text -noout + +mkdir -p $CUSTOM_CERTS_PATH +cp $TEST_NGINX_CONF_PATH/ca.crt $CUSTOM_CERTS_PATH/test-custom-ca-roots.crt + +# generate server certificate +openssl req -new -nodes -newkey rsa:2048 -keyout $TEST_NGINX_CONF_PATH/self.test.key \ +-addext "subjectAltName=DNS:self.test" \ +-out $TEST_NGINX_CONF_PATH/self.test.req -batch -subj "/CN=Self Signed with CA Test Server" + +# openssl req -in nginx/self.test.req -text -noout + +openssl x509 -req -in $TEST_NGINX_CONF_PATH/self.test.req -CA $TEST_NGINX_CONF_PATH/ca.crt -CAkey $TEST_NGINX_CONF_PATH/ca.key \ +-extfile <(printf "subjectAltName=DNS:self.test") \ +-CAcreateserial -out $TEST_NGINX_CONF_PATH/self.test.crt -days 1 -sha256 + +# openssl x509 -in nginx/self.test.crt -text -noout + +# sanity check that signed certificate passes OpenSSL's validation +openssl verify -CAfile $TEST_NGINX_CONF_PATH/ca.crt $TEST_NGINX_CONF_PATH/self.test.crt + +# self signed certificate, for sanity check of not just accepting all certs +openssl req -x509 -newkey rsa:2048 -nodes -days 1 -keyout $TEST_NGINX_CONF_PATH/fake.test.key \ +-out $TEST_NGINX_CONF_PATH/fake.test.crt -addext "subjectAltName=DNS:fake.test" -subj "/CN=Self Signed Test Server" + +# openssl x509 -in nginx/fake.test.crt -text -noout + +cp ./custom-ca-roots/test.py ../sentry/test-custom-ca-roots.py + +$dc up -d fixture-custom-ca-roots diff --git a/_integration-test/custom-ca-roots/teardown.sh b/_integration-test/custom-ca-roots/teardown.sh new file mode 100755 index 0000000000..059f69b93b --- /dev/null +++ b/_integration-test/custom-ca-roots/teardown.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +$dc rm -s -f -v fixture-custom-ca-roots +rm -f ../certificates/test-custom-ca-roots.crt ../sentry/test-custom-ca-roots.py +unset COMPOSE_FILE diff --git a/_integration-test/custom-ca-roots/test.py b/_integration-test/custom-ca-roots/test.py new file mode 100644 index 0000000000..0f9b501f83 --- /dev/null +++ b/_integration-test/custom-ca-roots/test.py @@ -0,0 +1,15 @@ +import unittest +import requests + + +class CustomCATests(unittest.TestCase): + def test_valid_self_signed(self): + self.assertEqual(requests.get("https://self.test").text, 'ok') + + def test_invalid_self_signed(self): + with self.assertRaises(requests.exceptions.SSLError): + requests.get("https://fail.test") + + +if __name__ == '__main__': + unittest.main() diff --git a/_integration-test/run.sh b/_integration-test/run.sh new file mode 100755 index 0000000000..67ec747cd3 --- /dev/null +++ b/_integration-test/run.sh @@ -0,0 +1,128 @@ +#!/usr/bin/env bash +set -ex + +source "$(dirname $0)/../install/_lib.sh" + +echo "${_group}Setting up variables and helpers ..." +export SENTRY_TEST_HOST="${SENTRY_TEST_HOST:-http://localhost:9000}" +TEST_USER='test@example.com' +TEST_PASS='test123TEST' +COOKIE_FILE=$(mktemp) + +# Courtesy of https://stackoverflow.com/a/2183063/90297 +trap_with_arg() { + func="$1" ; shift + for sig ; do + trap "$func $sig "'$LINENO' "$sig" + done +} + +DID_CLEAN_UP=0 +# the cleanup function will be the exit point +cleanup () { + if [ "$DID_CLEAN_UP" -eq 1 ]; then + return 0; + fi + DID_CLEAN_UP=1 + + if [ "$1" != "EXIT" ]; then + echo "An error occurred, caught SIG$1 on line $2"; + fi + + echo "Cleaning up..." + rm $COOKIE_FILE + echo "Done." +} +trap_with_arg cleanup ERR INT TERM EXIT +echo "${_endgroup}" + +echo "${_group}Starting Sentry for tests ..." +# Disable beacon for e2e tests +echo 'SENTRY_BEACON=False' >> $SENTRY_CONFIG_PY +$dcr web createuser --superuser --email $TEST_USER --password $TEST_PASS || true +$dc up -d +printf "Waiting for Sentry to be up"; timeout 60 bash -c 'until $(curl -Isf -o /dev/null $SENTRY_TEST_HOST); do printf '.'; sleep 0.5; done' +echo "" +echo "${_endgroup}" + +echo "${_group}Running tests ..." +get_csrf_token () { awk '$6 == "sc" { print $7 }' $COOKIE_FILE; } +sentry_api_request () { curl -s -H 'Accept: application/json; charset=utf-8' -H "Referer: $SENTRY_TEST_HOST" -H 'Content-Type: application/json' -H "X-CSRFToken: $(get_csrf_token)" -b "$COOKIE_FILE" -c "$COOKIE_FILE" "$SENTRY_TEST_HOST/api/0/$1" ${@:2}; } + +login () { + INITIAL_AUTH_REDIRECT=$(curl -sL -o /dev/null $SENTRY_TEST_HOST -w %{url_effective}) + if [ "$INITIAL_AUTH_REDIRECT" != "$SENTRY_TEST_HOST/auth/login/sentry/" ]; then + echo "Initial /auth/login/ redirect failed, exiting..." + echo "$INITIAL_AUTH_REDIRECT" + exit -1 + fi + + CSRF_TOKEN_FOR_LOGIN=$(curl $SENTRY_TEST_HOST -sL -c "$COOKIE_FILE" | awk -F "['\"]" ' + /csrfmiddlewaretoken/ { + print $4 "=" $6; + exit; + }') + + curl -sL --data-urlencode 'op=login' --data-urlencode "username=$TEST_USER" --data-urlencode "password=$TEST_PASS" --data-urlencode "$CSRF_TOKEN_FOR_LOGIN" "$SENTRY_TEST_HOST/auth/login/sentry/" -H "Referer: $SENTRY_TEST_HOST/auth/login/sentry/" -b "$COOKIE_FILE" -c "$COOKIE_FILE"; +} + +LOGIN_RESPONSE=$(login); +declare -a LOGIN_TEST_STRINGS=( + '"isAuthenticated":true' + '"username":"test@example.com"' + '"isSuperuser":true' +) +for i in "${LOGIN_TEST_STRINGS[@]}" +do + echo "Testing '$i'..." + echo "$LOGIN_RESPONSE" | grep "$i[,}]" >& /dev/null + echo "Pass." +done +echo "${_endgroup}" + +echo "${_group}Running moar tests !!!" +# Set up initial/required settings (InstallWizard request) +sentry_api_request "internal/options/?query=is:required" -X PUT --data '{"mail.use-tls":false,"mail.username":"","mail.port":25,"system.admin-email":"ben@byk.im","mail.password":"","system.url-prefix":"'"$SENTRY_TEST_HOST"'","auth.allow-registration":false,"beacon.anonymous":true}' > /dev/null + +SENTRY_DSN=$(sentry_api_request "projects/sentry/internal/keys/" | awk 'BEGIN { RS=",|:{\n"; FS="\""; } $2 == "public" && $4 ~ "^http" { print $4; exit; }') +# We ignore the protocol and the host as we already know those +DSN_PIECES=(`echo $SENTRY_DSN | sed -ne 's|^https\{0,1\}://\([0-9a-z]\{1,\}\)@[^/]\{1,\}/\([0-9]\{1,\}\)$|\1 \2|p' | tr ' ' '\n'`) +SENTRY_KEY=${DSN_PIECES[0]} +PROJECT_ID=${DSN_PIECES[1]} + +TEST_EVENT_ID=$(export LC_ALL=C; head /dev/urandom | tr -dc "a-f0-9" | head -c 32) +# Thanks @untitaker - https://forum.sentry.io/t/how-can-i-post-with-curl-a-sentry-event-which-authentication-credentials/4759/2?u=byk +echo "Creating test event..." +curl -sf --data '{"event_id": "'"$TEST_EVENT_ID"'","level":"error","message":"a failure","extra":{"object":"42"}}' -H 'Content-Type: application/json' -H "X-Sentry-Auth: Sentry sentry_version=7, sentry_key=$SENTRY_KEY, sentry_client=test-bash/0.1" "$SENTRY_TEST_HOST/api/$PROJECT_ID/store/" -o /dev/null + +EVENT_PATH="projects/sentry/internal/events/$TEST_EVENT_ID/" +export -f sentry_api_request get_csrf_token +export SENTRY_TEST_HOST COOKIE_FILE EVENT_PATH +printf "Getting the test event back" +timeout 30 bash -c 'until $(sentry_api_request "$EVENT_PATH" -Isf -X GET -o /dev/null); do printf '.'; sleep 0.5; done' +echo " got it!"; + +EVENT_RESPONSE=$(sentry_api_request "$EVENT_PATH") +declare -a EVENT_TEST_STRINGS=( + '"eventID":"'"$TEST_EVENT_ID"'"' + '"message":"a failure"' + '"title":"a failure"' + '"object":"42"' +) +for i in "${EVENT_TEST_STRINGS[@]}" +do + echo "Testing '$i'..." + echo "$EVENT_RESPONSE" | grep "$i[,}]" >& /dev/null + echo "Pass." +done +echo "${_endgroup}" + +echo "${_group}Ensure cleanup crons are working ..." +$dc ps | grep -q -E -e '\-cleanup\s+running\s+' -e '\-cleanup[_-].+\s+Up\s+' +echo "${_endgroup}" + +echo "${_group}Test custom CAs work ..." +source ./custom-ca-roots/setup.sh +$dcr --no-deps web python3 /etc/sentry/test-custom-ca-roots.py +source ./custom-ca-roots/teardown.sh +echo "${_endgroup}" diff --git a/certificates/.gitignore b/certificates/.gitignore new file mode 100644 index 0000000000..30d0607b1f --- /dev/null +++ b/certificates/.gitignore @@ -0,0 +1,3 @@ +# Add all custom CAs in this folder +* +!.gitignore diff --git a/clickhouse/config.xml b/clickhouse/config.xml new file mode 100644 index 0000000000..55cdbbd82b --- /dev/null +++ b/clickhouse/config.xml @@ -0,0 +1,10 @@ + + + + information + 1 + + + 1 + + diff --git a/config.yml b/config.yml deleted file mode 100644 index 8a0844595c..0000000000 --- a/config.yml +++ /dev/null @@ -1,63 +0,0 @@ -# While a lot of configuration in Sentry can be changed via the UI, for all -# new-style config (as of 8.0) you can also declare values here in this file -# to enforce defaults or to ensure they cannot be changed via the UI. For more -# information see the Sentry documentation. - -############### -# Mail Server # -############### - -# mail.backend: 'smtp' # Use dummy if you want to disable email entirely -# mail.host: 'localhost' -# mail.port: 25 -# mail.username: '' -# mail.password: '' -# mail.use-tls: false -# The email address to send on behalf of -# mail.from: 'root@localhost' - -# If you'd like to configure email replies, enable this. -# mail.enable-replies: false - -# When email-replies are enabled, this value is used in the Reply-To header -# mail.reply-hostname: '' - -# If you're using mailgun for inbound mail, set your API key and configure a -# route to forward to /api/hooks/mailgun/inbound/ -# mail.mailgun-api-key: '' - -################### -# System Settings # -################### - -# If this file ever becomes compromised, it's important to regenerate your a new key -# Changing this value will result in all current sessions being invalidated. -# A new key can be generated with `$ sentry config generate-secret-key` -# system.secret-key: 'changeme' - -# The ``redis.clusters`` setting is used, unsurprisingly, to configure Redis -# clusters. These clusters can be then referred to by name when configuring -# backends such as the cache, digests, or TSDB backend. -# redis.clusters: -# default: -# hosts: -# 0: -# host: 127.0.0.1 -# port: 6379 - -################ -# File storage # -################ - -# Uploaded media uses these `filestore` settings. The available -# backends are either `filesystem` or `s3`. - -# filestore.backend: 'filesystem' -# filestore.options: -# location: '/tmp/sentry-files' - -# filestore.backend: 's3' -# filestore.options: -# access_key: 'AKIXXXXXX' -# secret_key: 'XXXXXXX' -# bucket_name: 's3-bucket-name' diff --git a/cron/Dockerfile b/cron/Dockerfile new file mode 100644 index 0000000000..0e6e76e9e8 --- /dev/null +++ b/cron/Dockerfile @@ -0,0 +1,6 @@ +ARG BASE_IMAGE +FROM ${BASE_IMAGE} +RUN apt-get update && apt-get install -y --no-install-recommends cron && \ + rm -r /var/lib/apt/lists/* +COPY entrypoint.sh /entrypoint.sh +ENTRYPOINT ["/entrypoint.sh"] diff --git a/cron/entrypoint.sh b/cron/entrypoint.sh new file mode 100755 index 0000000000..383c8b29c7 --- /dev/null +++ b/cron/entrypoint.sh @@ -0,0 +1,19 @@ +#!/usr/bin/env bash + +if [ "$(ls -A /usr/local/share/ca-certificates/)" ]; then + update-ca-certificates +fi + +# Prior art: +# - https://git.io/fjNOg +# - https://blog.knoldus.com/running-a-cron-job-in-docker-container/ + +declare -p | grep -Ev 'BASHOPTS|BASH_VERSINFO|EUID|PPID|SHELLOPTS|UID' > /container.env + +{ for cron_job in "$@"; do echo -e "SHELL=/bin/bash +BASH_ENV=/container.env +${cron_job} > /proc/1/fd/1 2>/proc/1/fd/2"; done } \ + | sed --regexp-extended 's/\\(.)/\1/g' \ + | crontab - +crontab -l +exec cron -f -l -L 15 diff --git a/docker-compose.yml b/docker-compose.yml index 68348e20ee..29342e4abc 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -1,66 +1,379 @@ -# NOTE: This docker-compose.yml is meant to be just an example of how -# you could accomplish this on your own. It is not intended to work in -# all use-cases and must be adapted to fit your needs. This is merely -# a guideline. - -# See docs.getsentry.com/on-premise/server/ for full -# instructions - -version: '3.4' - -x-defaults: &defaults +x-restart-policy: &restart_policy restart: unless-stopped - build: . +x-depends_on-healthy: &depends_on-healthy + condition: service_healthy +x-depends_on-default: &depends_on-default + condition: service_started +x-healthcheck-defaults: &healthcheck_defaults + # Avoid setting the interval too small, as docker uses much more CPU than one would expect. + # Related issues: + # https://github.com/moby/moby/issues/39102 + # https://github.com/moby/moby/issues/39388 + # https://github.com/getsentry/onpremise/issues/1000 + interval: 30s + timeout: 5s + retries: 5 + start_period: 10s +x-sentry-defaults: &sentry_defaults + <<: *restart_policy + image: "$SENTRY_IMAGE" depends_on: - - redis - - postgres - - memcached - - smtp - env_file: .env + redis: + <<: *depends_on-healthy + kafka: + <<: *depends_on-healthy + postgres: + <<: *depends_on-healthy + memcached: + <<: *depends_on-default + smtp: + <<: *depends_on-default + snuba-api: + <<: *depends_on-default + snuba-consumer: + <<: *depends_on-default + snuba-outcomes-consumer: + <<: *depends_on-default + snuba-sessions-consumer: + <<: *depends_on-default + snuba-transactions-consumer: + <<: *depends_on-default + snuba-subscription-consumer-events: + <<: *depends_on-default + snuba-subscription-consumer-transactions: + <<: *depends_on-default + snuba-replacer: + <<: *depends_on-default + symbolicator: + <<: *depends_on-default + entrypoint: "/etc/sentry/entrypoint.sh" + command: ["run", "web"] environment: - SENTRY_MEMCACHED_HOST: memcached - SENTRY_REDIS_HOST: redis - SENTRY_POSTGRES_HOST: postgres - SENTRY_EMAIL_HOST: smtp + PYTHONUSERBASE: "/data/custom-packages" + SENTRY_CONF: "/etc/sentry" + SNUBA: "http://snuba-api:1218" + # Force everything to use the system CA bundle + # This is mostly needed to support installing custom CA certs + # This one is used by botocore + DEFAULT_CA_BUNDLE: &ca_bundle "/etc/ssl/certs/ca-certificates.crt" + # This one is used by requests + REQUESTS_CA_BUNDLE: *ca_bundle + # This one is used by grpc/google modules + GRPC_DEFAULT_SSL_ROOTS_FILE_PATH_ENV_VAR: *ca_bundle + # Leaving the value empty to just pass whatever is set + # on the host system (or in the .env file) + SENTRY_EVENT_RETENTION_DAYS: + SENTRY_MAIL_HOST: volumes: - - sentry-data:/var/lib/sentry/files - - + - "sentry-data:/data" + - "./sentry:/etc/sentry" + - "./geoip:/geoip:ro" + - "./certificates:/usr/local/share/ca-certificates:ro" +x-snuba-defaults: &snuba_defaults + <<: *restart_policy + depends_on: + clickhouse: + <<: *depends_on-healthy + kafka: + <<: *depends_on-healthy + redis: + <<: *depends_on-healthy + image: "$SNUBA_IMAGE" + environment: + SNUBA_SETTINGS: docker + CLICKHOUSE_HOST: clickhouse + DEFAULT_BROKERS: "kafka:9092" + REDIS_HOST: redis + UWSGI_MAX_REQUESTS: "10000" + UWSGI_DISABLE_LOGGING: "true" + # Leaving the value empty to just pass whatever is set + # on the host system (or in the .env file) + SENTRY_EVENT_RETENTION_DAYS: services: smtp: - restart: unless-stopped + <<: *restart_policy image: tianon/exim4 - + hostname: ${SENTRY_MAIL_HOST:-''} + volumes: + - "sentry-smtp:/var/spool/exim4" + - "sentry-smtp-log:/var/log/exim4" memcached: - restart: unless-stopped - image: memcached:1.5-alpine - + <<: *restart_policy + image: "memcached:1.6.9-alpine" + healthcheck: + <<: *healthcheck_defaults + # From: https://stackoverflow.com/a/31877626/5155484 + test: echo stats | nc 127.0.0.1 11211 redis: - restart: unless-stopped - image: redis:3.2-alpine - + <<: *restart_policy + image: "redis:6.2.4-alpine" + healthcheck: + <<: *healthcheck_defaults + test: redis-cli ping + volumes: + - "sentry-redis:/data" + ulimits: + nofile: + soft: 10032 + hard: 10032 postgres: - restart: unless-stopped - image: postgres:9.5 + <<: *restart_policy + image: "postgres:9.6" + healthcheck: + <<: *healthcheck_defaults + # Using default user "postgres" from sentry/sentry.conf.example.py or value of POSTGRES_USER if provided + test: ["CMD-SHELL", "pg_isready -U ${POSTGRES_USER:-postgres}"] + command: + [ + "postgres", + "-c", + "wal_level=logical", + "-c", + "max_replication_slots=1", + "-c", + "max_wal_senders=1", + ] + environment: + POSTGRES_HOST_AUTH_METHOD: "trust" + entrypoint: /opt/sentry/postgres-entrypoint.sh + volumes: + - "sentry-postgres:/var/lib/postgresql/data" + - type: bind + read_only: true + source: ./postgres/ + target: /opt/sentry/ + zookeeper: + <<: *restart_policy + image: "confluentinc/cp-zookeeper:5.5.0" + environment: + ZOOKEEPER_CLIENT_PORT: "2181" + CONFLUENT_SUPPORT_METRICS_ENABLE: "false" + ZOOKEEPER_LOG4J_ROOT_LOGLEVEL: "WARN" + ZOOKEEPER_TOOLS_LOG4J_LOGLEVEL: "WARN" + KAFKA_OPTS: "-Dzookeeper.4lw.commands.whitelist=ruok" + volumes: + - "sentry-zookeeper:/var/lib/zookeeper/data" + - "sentry-zookeeper-log:/var/lib/zookeeper/log" + - "sentry-secrets:/etc/zookeeper/secrets" + healthcheck: + <<: *healthcheck_defaults + test: + ["CMD-SHELL", 'echo "ruok" | nc -w 2 -q 2 localhost 2181 | grep imok'] + kafka: + <<: *restart_policy + depends_on: + zookeeper: + <<: *depends_on-healthy + image: "confluentinc/cp-kafka:5.5.0" + environment: + KAFKA_ZOOKEEPER_CONNECT: "zookeeper:2181" + KAFKA_ADVERTISED_LISTENERS: "PLAINTEXT://kafka:9092" + KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: "1" + KAFKA_OFFSETS_TOPIC_NUM_PARTITIONS: "1" + KAFKA_LOG_RETENTION_HOURS: "24" + KAFKA_MESSAGE_MAX_BYTES: "50000000" #50MB or bust + KAFKA_MAX_REQUEST_SIZE: "50000000" #50MB on requests apparently too + CONFLUENT_SUPPORT_METRICS_ENABLE: "false" + KAFKA_LOG4J_LOGGERS: "kafka.cluster=WARN,kafka.controller=WARN,kafka.coordinator=WARN,kafka.log=WARN,kafka.server=WARN,kafka.zookeeper=WARN,state.change.logger=WARN" + KAFKA_LOG4J_ROOT_LOGLEVEL: "WARN" + KAFKA_TOOLS_LOG4J_LOGLEVEL: "WARN" + volumes: + - "sentry-kafka:/var/lib/kafka/data" + - "sentry-kafka-log:/var/lib/kafka/log" + - "sentry-secrets:/etc/kafka/secrets" + healthcheck: + <<: *healthcheck_defaults + test: ["CMD-SHELL", "nc -z localhost 9092"] + clickhouse: + <<: *restart_policy + image: "yandex/clickhouse-server:20.3.9.70" + ulimits: + nofile: + soft: 262144 + hard: 262144 volumes: - - sentry-postgres:/var/lib/postgresql/data - + - "sentry-clickhouse:/var/lib/clickhouse" + - "sentry-clickhouse-log:/var/log/clickhouse-server" + - type: bind + read_only: true + source: ./clickhouse/config.xml + target: /etc/clickhouse-server/config.d/sentry.xml + environment: + # This limits Clickhouse's memory to 30% of the host memory + # If you have high volume and your search return incomplete results + # You might want to change this to a higher value (and ensure your host has enough memory) + MAX_MEMORY_USAGE_RATIO: 0.3 + healthcheck: + test: + [ + "CMD-SHELL", + "wget -nv -t1 --spider 'http://localhost:8123/' || exit 1", + ] + interval: 3s + timeout: 600s + retries: 200 + geoipupdate: + image: "maxmindinc/geoipupdate:v4.7.1" + # Override the entrypoint in order to avoid using envvars for config. + # Futz with settings so we can keep mmdb and conf in same dir on host + # (image looks for them in separate dirs by default). + entrypoint: + ["/usr/bin/geoipupdate", "-d", "/sentry", "-f", "/sentry/GeoIP.conf"] + volumes: + - "./geoip:/sentry" + snuba-api: + <<: *snuba_defaults + # Kafka consumer responsible for feeding events into Clickhouse + snuba-consumer: + <<: *snuba_defaults + command: consumer --storage errors --auto-offset-reset=latest --max-batch-time-ms 750 + # Kafka consumer responsible for feeding outcomes into Clickhouse + # Use --auto-offset-reset=earliest to recover up to 7 days of TSDB data + # since we did not do a proper migration + snuba-outcomes-consumer: + <<: *snuba_defaults + command: consumer --storage outcomes_raw --auto-offset-reset=earliest --max-batch-time-ms 750 + # Kafka consumer responsible for feeding session data into Clickhouse + snuba-sessions-consumer: + <<: *snuba_defaults + command: consumer --storage sessions_raw --auto-offset-reset=latest --max-batch-time-ms 750 + # Kafka consumer responsible for feeding transactions data into Clickhouse + snuba-transactions-consumer: + <<: *snuba_defaults + command: consumer --storage transactions --consumer-group transactions_group --auto-offset-reset=latest --max-batch-time-ms 750 --commit-log-topic=snuba-commit-log + snuba-replacer: + <<: *snuba_defaults + command: replacer --storage errors --auto-offset-reset=latest --max-batch-size 3 + snuba-subscription-consumer-events: + <<: *snuba_defaults + command: subscriptions --auto-offset-reset=latest --consumer-group=snuba-events-subscriptions-consumers --topic=events --result-topic=events-subscription-results --dataset=events --commit-log-topic=snuba-commit-log --commit-log-group=snuba-consumers --delay-seconds=60 --schedule-ttl=60 + snuba-subscription-consumer-transactions: + <<: *snuba_defaults + command: subscriptions --auto-offset-reset=latest --consumer-group=snuba-transactions-subscriptions-consumers --topic=events --result-topic=transactions-subscription-results --dataset=transactions --commit-log-topic=snuba-commit-log --commit-log-group=transactions_group --delay-seconds=60 --schedule-ttl=60 + snuba-cleanup: + <<: *snuba_defaults + image: snuba-cleanup-onpremise-local + build: + context: ./cron + args: + BASE_IMAGE: "$SNUBA_IMAGE" + command: '"*/5 * * * * gosu snuba snuba cleanup --storage errors --dry-run False"' + snuba-transactions-cleanup: + <<: *snuba_defaults + image: snuba-cleanup-onpremise-local + build: + context: ./cron + args: + BASE_IMAGE: "$SNUBA_IMAGE" + command: '"*/5 * * * * gosu snuba snuba cleanup --storage transactions --dry-run False"' + symbolicator: + <<: *restart_policy + image: "$SYMBOLICATOR_IMAGE" + volumes: + - "sentry-symbolicator:/data" + - type: bind + read_only: true + source: ./symbolicator + target: /etc/symbolicator + command: run -c /etc/symbolicator/config.yml + symbolicator-cleanup: + <<: *restart_policy + image: symbolicator-cleanup-onpremise-local + build: + context: ./cron + args: + BASE_IMAGE: "$SYMBOLICATOR_IMAGE" + command: '"55 23 * * * gosu symbolicator symbolicator cleanup"' + volumes: + - "sentry-symbolicator:/data" web: - <<: *defaults - ports: - - '9000:9000' - + <<: *sentry_defaults + healthcheck: + <<: *healthcheck_defaults + test: + - "CMD" + - "/bin/bash" + - '-c' + # Courtesy of https://unix.stackexchange.com/a/234089/108960 + - 'exec 3<>/dev/tcp/127.0.0.1/9000 && echo -e "GET /_health/ HTTP/1.1\r\nhost: 127.0.0.1\r\n\r\n" >&3 && grep ok -s -m 1 <&3' cron: - <<: *defaults + <<: *sentry_defaults command: run cron - worker: - <<: *defaults + <<: *sentry_defaults command: run worker - - + ingest-consumer: + <<: *sentry_defaults + command: run ingest-consumer --all-consumer-types + post-process-forwarder: + <<: *sentry_defaults + # Increase `--commit-batch-size 1` below to deal with high-load environments. + command: run post-process-forwarder --commit-batch-size 1 + subscription-consumer-events: + <<: *sentry_defaults + command: run query-subscription-consumer --commit-batch-size 1 --topic events-subscription-results + subscription-consumer-transactions: + <<: *sentry_defaults + command: run query-subscription-consumer --commit-batch-size 1 --topic transactions-subscription-results + sentry-cleanup: + <<: *sentry_defaults + image: sentry-cleanup-onpremise-local + build: + context: ./cron + args: + BASE_IMAGE: "$SENTRY_IMAGE" + entrypoint: "/entrypoint.sh" + command: '"0 0 * * * gosu sentry sentry cleanup --days $SENTRY_EVENT_RETENTION_DAYS"' + nginx: + <<: *restart_policy + ports: + - "$SENTRY_BIND:80/tcp" + image: "nginx:1.21.0-alpine" + volumes: + - type: bind + read_only: true + source: ./nginx + target: /etc/nginx + depends_on: + - web + - relay + relay: + <<: *restart_policy + image: "$RELAY_IMAGE" + volumes: + - type: bind + read_only: true + source: ./relay + target: /work/.relay + - type: bind + read_only: true + source: ./geoip + target: /geoip + depends_on: + kafka: + <<: *depends_on-healthy + redis: + <<: *depends_on-healthy + web: + <<: *depends_on-healthy volumes: - sentry-data: - external: true - sentry-postgres: - external: true + sentry-data: + external: true + sentry-postgres: + external: true + sentry-redis: + external: true + sentry-zookeeper: + external: true + sentry-kafka: + external: true + sentry-clickhouse: + external: true + sentry-symbolicator: + external: true + sentry-secrets: + sentry-smtp: + sentry-zookeeper-log: + sentry-kafka-log: + sentry-smtp-log: + sentry-clickhouse-log: diff --git a/geoip/GeoLite2-City.mmdb.empty b/geoip/GeoLite2-City.mmdb.empty new file mode 100644 index 0000000000..94f6921fd1 Binary files /dev/null and b/geoip/GeoLite2-City.mmdb.empty differ diff --git a/install.sh b/install.sh new file mode 100755 index 0000000000..3886463061 --- /dev/null +++ b/install.sh @@ -0,0 +1,29 @@ +#!/usr/bin/env bash +set -e +if [[ -n "$MSYSTEM" ]]; then + echo "Seems like you are using an MSYS2-based system (such as Git Bash) which is not supported. Please use WSL instead."; + exit 1 +fi + +source "$(dirname $0)/install/_lib.sh" # does a `cd .../install/`, among other things + +source parse-cli.sh +source error-handling.sh +source check-minimum-requirements.sh +source create-docker-volumes.sh +source ensure-files-from-examples.sh +source generate-secret-key.sh +source replace-tsdb.sh +source update-docker-images.sh +source build-docker-images.sh +source turn-things-off.sh +source set-up-zookeeper.sh +source install-wal2json.sh +source bootstrap-snuba.sh +source create-kafka-topics.sh +source upgrade-postgres.sh +source set-up-and-migrate-database.sh +source migrate-file-storage.sh +source relay-credentials.sh +source geoip.sh +source wrap-up.sh diff --git a/install/_lib.sh b/install/_lib.sh new file mode 100644 index 0000000000..70f73c6e07 --- /dev/null +++ b/install/_lib.sh @@ -0,0 +1,50 @@ +set -euo pipefail +test "${DEBUG:-}" && set -x + +# Thanks to https://unix.stackexchange.com/a/145654/108960 +log_file="sentry_install_log-`date +'%Y-%m-%d_%H-%M-%S'`.txt" +exec &> >(tee -a "$log_file") + +# Work from /install/ for install.sh, project root otherwise +if [[ "$(basename $0)" = "install.sh" ]]; then + cd "$(dirname $0)/install/" +else + cd "$(dirname $0)" # assume we're a test script or some such +fi + +_ENV="$(realpath ../.env)" + +# Read .env for default values with a tip o' the hat to https://stackoverflow.com/a/59831605/90297 +t=$(mktemp) && export -p > "$t" && set -a && . $_ENV && set +a && . "$t" && rm "$t" && unset t + +if [ "${GITHUB_ACTIONS:-}" = "true" ]; then + _group="::group::" + _endgroup="::endgroup::" +else + _group="β–Ά " + _endgroup="" +fi + +dc_base="$(docker compose version >/dev/null && echo 'docker compose' || echo 'docker-compose')" +dc="$dc_base --ansi never" +dcr="$dc run --rm" + +# A couple of the config files are referenced from other subscripts, so they +# get vars, while multiple subscripts call ensure_file_from_example. +function ensure_file_from_example { + if [[ -f "$1" ]]; then + echo "$1 already exists, skipped creation." + else + echo "Creating $1..." + cp -n $(echo "$1" | sed 's/\.[^.]*$/.example&/') "$1" + # sed from https://stackoverflow.com/a/25123013/90297 + fi +} +SENTRY_CONFIG_PY='../sentry/sentry.conf.py' +SENTRY_CONFIG_YML='../sentry/config.yml' + +# Increase the default 10 second SIGTERM timeout +# to ensure celery queues are properly drained +# between upgrades as task signatures may change across +# versions +STOP_TIMEOUT=60 # seconds diff --git a/install/_min-requirements.sh b/install/_min-requirements.sh new file mode 100644 index 0000000000..fb1b8a1a00 --- /dev/null +++ b/install/_min-requirements.sh @@ -0,0 +1,7 @@ +# Don't forget to update the README and othes docs when you change these! +MIN_DOCKER_VERSION='19.03.6' +MIN_COMPOSE_VERSION='1.28.0' +MIN_RAM_HARD=3800 # MB +MIN_RAM_SOFT=7800 # MB +MIN_CPU_HARD=2 +MIN_CPU_SOFT=4 diff --git a/install/_test_setup.sh b/install/_test_setup.sh new file mode 100644 index 0000000000..6fdf29eecb --- /dev/null +++ b/install/_test_setup.sh @@ -0,0 +1,52 @@ +set -euo pipefail +source "$(dirname $0)/_lib.sh" + +rm -rf /tmp/sentry-onpremise-test-sandbox.* +_SANDBOX="$(mktemp -d /tmp/sentry-onpremise-test-sandbox.XXX)" + +report_success() { + echo "$(basename $0) - Success πŸ‘" +} + +teardown() { + test "${DEBUG:-}" || rm -rf "$_SANDBOX" +} + +setup() { + cd .. + + # Clone the local repo into a temp dir. FWIW `git clone --local` breaks for + # me because it depends on hard-linking, which doesn't work across devices, + # and I happen to have my workspace and /tmp on separate devices. + git -c advice.detachedHead=false clone --depth=1 "file://$(pwd)" "$_SANDBOX" + + # Now propagate any local changes from the working copy to the sandbox. This + # provides a pretty nice dev experience: edit the files in the working copy, + # then run `DEBUG=1 some-test.sh` to leave the sandbox up for interactive + # dev/debugging. + git status --porcelain | while read line; do + # $line here is something like `M some-script.sh`. + + local filepath="$(cut -f2 -d' ' <(echo $line))" + local filestatus="$(cut -f1 -d' ' <(echo $line))" + + case $filestatus in + D) + rm "$_SANDBOX/$filepath" + ;; + A | M | AM | ??) + ln -sf "$(realpath $filepath)" "$_SANDBOX/$filepath" + ;; + **) + echo "Wuh? $line" + exit 77 + ;; + esac + done + + cd "$_SANDBOX/install" + + trap teardown EXIT +} + +setup diff --git a/install/bootstrap-snuba.sh b/install/bootstrap-snuba.sh new file mode 100644 index 0000000000..2952ed0b33 --- /dev/null +++ b/install/bootstrap-snuba.sh @@ -0,0 +1,6 @@ +echo "${_group}Bootstrapping and migrating Snuba ..." + +$dcr snuba-api bootstrap --no-migrate --force +$dcr snuba-api migrations migrate --force + +echo "${_endgroup}" diff --git a/install/build-docker-images.sh b/install/build-docker-images.sh new file mode 100644 index 0000000000..4bb96b5ea2 --- /dev/null +++ b/install/build-docker-images.sh @@ -0,0 +1,8 @@ +echo "${_group}Building and tagging Docker images ..." + +echo "" +$dc build --force-rm +echo "" +echo "Docker images built." + +echo "${_endgroup}" diff --git a/install/check-minimum-requirements.sh b/install/check-minimum-requirements.sh new file mode 100644 index 0000000000..fc564a2c52 --- /dev/null +++ b/install/check-minimum-requirements.sh @@ -0,0 +1,54 @@ +echo "${_group}Checking minimum requirements ..." + +source "$(dirname $0)/_min-requirements.sh" + +DOCKER_VERSION=$(docker version --format '{{.Server.Version}}') +# Get semantic version of Docker Compose v2 +if docker compose version >/dev/null; then + COMPOSE_VERSION=$(docker compose version --short | sed 's/v\{0,1\}\(.\{1,\}\)/\1/') +else + # Do NOT use $dc instead of `docker-compose` below as older versions don't support certain options and fail + COMPOSE_VERSION=$(docker-compose --version | sed 's/docker-compose version \(.\{1,\}\),.*/\1/') +fi +RAM_AVAILABLE_IN_DOCKER=$(docker run --rm busybox free -m 2>/dev/null | awk '/Mem/ {print $2}'); +CPU_AVAILABLE_IN_DOCKER=$(docker run --rm busybox nproc --all); + +# Compare dot-separated strings - function below is inspired by https://stackoverflow.com/a/37939589/808368 +function ver () { echo "$@" | awk -F. '{ printf("%d%03d%03d", $1,$2,$3); }'; } + +if [[ "$(ver $DOCKER_VERSION)" -lt "$(ver $MIN_DOCKER_VERSION)" ]]; then + echo "FAIL: Expected minimum Docker version to be $MIN_DOCKER_VERSION but found $DOCKER_VERSION" + exit 1 +fi + +if [[ "$(ver $COMPOSE_VERSION)" -lt "$(ver $MIN_COMPOSE_VERSION)" ]]; then + echo "FAIL: Expected minimum docker-compose version to be $MIN_COMPOSE_VERSION but found $COMPOSE_VERSION" + exit 1 +fi + +if [[ "$CPU_AVAILABLE_IN_DOCKER" -lt "$MIN_CPU_HARD" ]]; then + echo "FAIL: Required minimum CPU cores available to Docker is $MIN_CPU_HARD, found $CPU_AVAILABLE_IN_DOCKER" + exit 1 +elif [[ "$CPU_AVAILABLE_IN_DOCKER" -lt "$MIN_CPU_SOFT" ]]; then + echo "WARN: Recommended minimum CPU cores available to Docker is $MIN_CPU_SOFT, found $CPU_AVAILABLE_IN_DOCKER" +fi + +if [[ "$RAM_AVAILABLE_IN_DOCKER" -lt "$MIN_RAM_HARD" ]]; then + echo "FAIL: Required minimum RAM available to Docker is $MIN_RAM_HARD MB, found $RAM_AVAILABLE_IN_DOCKER MB" + exit 1 +elif [[ "$RAM_AVAILABLE_IN_DOCKER" -lt "$MIN_RAM_SOFT" ]]; then + echo "WARN: Recommended minimum RAM available to Docker is $MIN_RAM_SOFT MB, found $RAM_AVAILABLE_IN_DOCKER MB" +fi + +#SSE4.2 required by Clickhouse (https://clickhouse.yandex/docs/en/operations/requirements/) +# On KVM, cpuinfo could falsely not report SSE 4.2 support, so skip the check. https://github.com/ClickHouse/ClickHouse/issues/20#issuecomment-226849297 +IS_KVM=$(docker run --rm busybox grep -c 'Common KVM processor' /proc/cpuinfo || :) +if [[ "$IS_KVM" -eq 0 ]]; then + SUPPORTS_SSE42=$(docker run --rm busybox grep -c sse4_2 /proc/cpuinfo || :) + if [[ "$SUPPORTS_SSE42" -eq 0 ]]; then + echo "FAIL: The CPU your machine is running on does not support the SSE 4.2 instruction set, which is required for one of the services Sentry uses (Clickhouse). See https://git.io/JvLDt for more info." + exit 1 + fi +fi + +echo "${_endgroup}" diff --git a/install/create-docker-volumes-test.sh b/install/create-docker-volumes-test.sh new file mode 100755 index 0000000000..7cf8969ee0 --- /dev/null +++ b/install/create-docker-volumes-test.sh @@ -0,0 +1,20 @@ +#!/usr/bin/env bash +source "$(dirname $0)/_test_setup.sh" + +expected=7 +count() { + docker volume ls --quiet | grep '^sentry-.*' | wc -l +} + +# Maybe they exist prior, maybe they don't. Script is idempotent. + +before=$(count) +test $before -eq 0 || test $before -eq $expected + +source create-docker-volumes.sh +source create-docker-volumes.sh +source create-docker-volumes.sh + +test $(count) -eq $expected + +report_success diff --git a/install/create-docker-volumes.sh b/install/create-docker-volumes.sh new file mode 100644 index 0000000000..ca3ef0b23e --- /dev/null +++ b/install/create-docker-volumes.sh @@ -0,0 +1,11 @@ +echo "${_group}Creating volumes for persistent storage ..." + +echo "Created $(docker volume create --name=sentry-clickhouse)." +echo "Created $(docker volume create --name=sentry-data)." +echo "Created $(docker volume create --name=sentry-kafka)." +echo "Created $(docker volume create --name=sentry-postgres)." +echo "Created $(docker volume create --name=sentry-redis)." +echo "Created $(docker volume create --name=sentry-symbolicator)." +echo "Created $(docker volume create --name=sentry-zookeeper)." + +echo "${_endgroup}" diff --git a/install/create-kafka-topics.sh b/install/create-kafka-topics.sh new file mode 100644 index 0000000000..a542cb54d3 --- /dev/null +++ b/install/create-kafka-topics.sh @@ -0,0 +1,14 @@ +echo "${_group}Creating additional Kafka topics ..." + +# NOTE: This step relies on `kafka` being available from the previous `snuba-api bootstrap` step +# XXX(BYK): We cannot use auto.create.topics as Confluence and Apache hates it now (and makes it very hard to enable) +EXISTING_KAFKA_TOPICS=$($dcr kafka kafka-topics --list --bootstrap-server kafka:9092 2>/dev/null) +NEEDED_KAFKA_TOPICS="ingest-attachments ingest-transactions ingest-events" +for topic in $NEEDED_KAFKA_TOPICS; do + if ! echo "$EXISTING_KAFKA_TOPICS" | grep -wq $topic; then + $dcr kafka kafka-topics --create --topic $topic --bootstrap-server kafka:9092 + echo "" + fi +done + +echo "${_endgroup}" diff --git a/install/ensure-files-from-examples.sh b/install/ensure-files-from-examples.sh new file mode 100644 index 0000000000..17958a01b4 --- /dev/null +++ b/install/ensure-files-from-examples.sh @@ -0,0 +1,8 @@ +echo "${_group}Ensuring files from examples ..." + +ensure_file_from_example $SENTRY_CONFIG_PY +ensure_file_from_example $SENTRY_CONFIG_YML +ensure_file_from_example '../symbolicator/config.yml' +ensure_file_from_example '../sentry/requirements.txt' + +echo "${_endgroup}" diff --git a/install/error-handling.sh b/install/error-handling.sh new file mode 100644 index 0000000000..d25ee01fa0 --- /dev/null +++ b/install/error-handling.sh @@ -0,0 +1,35 @@ +echo "${_group}Setting up error handling ..." + +# Courtesy of https://stackoverflow.com/a/2183063/90297 +trap_with_arg() { + func="$1" ; shift + for sig ; do + trap "$func $sig "'$LINENO' "$sig" + done +} + +DID_CLEAN_UP=0 +# the cleanup function will be the exit point +cleanup () { + if [[ "$DID_CLEAN_UP" -eq 1 ]]; then + return 0; + fi + DID_CLEAN_UP=1 + + if [[ "$1" != "EXIT" ]]; then + echo "An error occurred, caught SIG$1 on line $2"; + + if [[ -n "$MINIMIZE_DOWNTIME" ]]; then + echo "*NOT* cleaning up, to clean your environment run \"docker-compose stop\"." + else + echo "Cleaning up..." + fi + fi + + if [[ -z "$MINIMIZE_DOWNTIME" ]]; then + $dc stop -t $STOP_TIMEOUT &> /dev/null + fi +} +trap_with_arg cleanup ERR INT TERM EXIT + +echo "${_endgroup}" diff --git a/install/generate-secret-key.sh b/install/generate-secret-key.sh new file mode 100644 index 0000000000..de2afbaafd --- /dev/null +++ b/install/generate-secret-key.sh @@ -0,0 +1,12 @@ +echo "${_group}Generating secret key ..." + +if grep -xq "system.secret-key: '!!changeme!!'" $SENTRY_CONFIG_YML ; then + # This is to escape the secret key to be used in sed below + # Note the need to set LC_ALL=C due to BSD tr and sed always trying to decode + # whatever is passed to them. Kudos to https://stackoverflow.com/a/23584470/90297 + SECRET_KEY=$(export LC_ALL=C; head /dev/urandom | tr -dc "a-z0-9@#%^&*(-_=+)" | head -c 50 | sed -e 's/[\/&]/\\&/g') + sed -i -e 's/^system.secret-key:.*$/system.secret-key: '"'$SECRET_KEY'"'/' $SENTRY_CONFIG_YML + echo "Secret key written to $SENTRY_CONFIG_YML" +fi + +echo "${_endgroup}" diff --git a/install/geoip-test.sh b/install/geoip-test.sh new file mode 100755 index 0000000000..3d61c11e3d --- /dev/null +++ b/install/geoip-test.sh @@ -0,0 +1,16 @@ +#!/usr/bin/env bash +source "$(dirname $0)/_test_setup.sh" + +mmdb="../geoip/GeoLite2-City.mmdb" + +# Starts with no mmdb, ends up with empty. +test ! -f $mmdb +source geoip.sh +diff -rub $mmdb $mmdb.empty + +# Doesn't clobber existing, though. +echo GARBAGE > $mmdb +source geoip.sh +test "$(cat $mmdb)" = "GARBAGE" + +report_success diff --git a/install/geoip.sh b/install/geoip.sh new file mode 100644 index 0000000000..bc5d84b64a --- /dev/null +++ b/install/geoip.sh @@ -0,0 +1,38 @@ +echo "${_group}Setting up GeoIP integration ..." + +install_geoip() { + cd ../geoip + + local mmdb='GeoLite2-City.mmdb' + local conf='GeoIP.conf' + local result='Done' + + echo "Setting up IP address geolocation ..." + if [[ ! -f "$mmdb" ]]; then + echo -n "Installing (empty) IP address geolocation database ... " + cp "$mmdb.empty" "$mmdb" + echo "done." + else + echo "IP address geolocation database already exists." + fi + + if [[ ! -f "$conf" ]]; then + echo "IP address geolocation is not configured for updates." + echo "See https://develop.sentry.dev/self-hosted/geolocation/ for instructions." + result='Error' + else + echo "IP address geolocation is configured for updates." + echo "Updating IP address geolocation database ... " + if ! $dcr geoipupdate; then + result='Error' + fi + echo "$result updating IP address geolocation database." + fi + echo "$result setting up IP address geolocation." + + cd ../install +} + +install_geoip + +echo "${_endgroup}" diff --git a/install/install-wal2json.sh b/install/install-wal2json.sh new file mode 100644 index 0000000000..2973c0034d --- /dev/null +++ b/install/install-wal2json.sh @@ -0,0 +1,38 @@ +echo "${_group}Downloading and installing wal2json ..." + +FILE_TO_USE="../postgres/wal2json/wal2json.so" +ARCH=$(uname -m) +FILE_NAME="wal2json-Linux-$ARCH-glibc.so" + +docker_curl() { + # The environment variables can be specified in lower case or upper case. + # The lower case version has precedence. http_proxy is an exception as it is only available in lower case. + docker run --rm -e http_proxy -e https_proxy -e HTTPS_PROXY -e no_proxy -e NO_PROXY curlimages/curl:7.77.0 "$@" +} + +if [[ $WAL2JSON_VERSION == "latest" ]]; then + VERSION=$( + docker_curl https://api.github.com/repos/getsentry/wal2json/releases/latest | + grep '"tag_name":' | + sed -E 's/.*"([^"]+)".*/\1/' + ) + + if [[ ! $VERSION ]]; then + echo "Cannot find wal2json latest version" + exit 1 + fi +else + VERSION=$WAL2JSON_VERSION +fi + +mkdir -p ../postgres/wal2json +if [ ! -f "../postgres/wal2json/$VERSION/$FILE_NAME" ]; then + mkdir -p "../postgres/wal2json/$VERSION" + docker_curl -L \ + "https://github.com/getsentry/wal2json/releases/download/$VERSION/$FILE_NAME" \ + > "../postgres/wal2json/$VERSION/$FILE_NAME" +fi +cp "../postgres/wal2json/$VERSION/$FILE_NAME" "$FILE_TO_USE" + + +echo "${_endgroup}" diff --git a/install/migrate-file-storage.sh b/install/migrate-file-storage.sh new file mode 100644 index 0000000000..8623faef51 --- /dev/null +++ b/install/migrate-file-storage.sh @@ -0,0 +1,11 @@ +echo "${_group}Migrating file storage ..." + +SENTRY_DATA_NEEDS_MIGRATION=$(docker run --rm -v sentry-data:/data alpine ash -c "[ ! -d '/data/files' ] && ls -A1x /data | wc -l || true") +if [[ -n "$SENTRY_DATA_NEEDS_MIGRATION" ]]; then + # Use the web (Sentry) image so the file owners are kept as sentry:sentry + # The `\"` escape pattern is to make this compatible w/ Git Bash on Windows. See #329. + $dcr --entrypoint \"/bin/bash\" web -c \ + "mkdir -p /tmp/files; mv /data/* /tmp/files/; mv /tmp/files /data/files; chown -R sentry:sentry /data" +fi + +echo "${_endgroup}" diff --git a/install/parse-cli.sh b/install/parse-cli.sh new file mode 100644 index 0000000000..f1b6218589 --- /dev/null +++ b/install/parse-cli.sh @@ -0,0 +1,32 @@ +echo "${_group}Parsing command line ..." + +show_help() { + cat < $cfg +echo MOAR GARBAGE > $creds +source relay-credentials.sh +test "$(cat $cfg)" = "GARBAGE" +test "$(cat $creds)" = "MOAR GARBAGE" + +report_success diff --git a/install/relay-credentials.sh b/install/relay-credentials.sh new file mode 100644 index 0000000000..2d62e2bf53 --- /dev/null +++ b/install/relay-credentials.sh @@ -0,0 +1,25 @@ +echo "${_group}Generating Relay credentials ..." + +RELAY_CONFIG_YML="../relay/config.yml" +RELAY_CREDENTIALS_JSON="../relay/credentials.json" + +ensure_file_from_example $RELAY_CONFIG_YML + +if [[ ! -f "$RELAY_CREDENTIALS_JSON" ]]; then + + # We need the ugly hack below as `relay generate credentials` tries to read + # the config and the credentials even with the `--stdout` and `--overwrite` + # flags and then errors out when the credentials file exists but not valid + # JSON. We hit this case as we redirect output to the same config folder, + # creating an empty credentials file before relay runs. + + $dcr \ + --no-deps \ + --volume "$(pwd)/$RELAY_CONFIG_YML:/tmp/config.yml" \ + relay --config /tmp credentials generate --stdout \ + > "$RELAY_CREDENTIALS_JSON" + + echo "Relay credentials written to $RELAY_CREDENTIALS_JSON" +fi + +echo "${_endgroup}" diff --git a/install/replace-tsdb.sh b/install/replace-tsdb.sh new file mode 100644 index 0000000000..0716bc148b --- /dev/null +++ b/install/replace-tsdb.sh @@ -0,0 +1,46 @@ +echo "${_group}Replacing TSDB ..." + +replace_tsdb() { + if ( + [[ -f "$SENTRY_CONFIG_PY" ]] && + ! grep -xq 'SENTRY_TSDB = "sentry.tsdb.redissnuba.RedisSnubaTSDB"' "$SENTRY_CONFIG_PY" + ); then + # Do NOT indent the following string as it would be reflected in the end result, + # breaking the final config file. See getsentry/onpremise#624. + tsdb_settings="\ +SENTRY_TSDB = \"sentry.tsdb.redissnuba.RedisSnubaTSDB\" + +# Automatic switchover 90 days after $(date). Can be removed afterwards. +SENTRY_TSDB_OPTIONS = {\"switchover_timestamp\": $(date +%s) + (90 * 24 * 3600)}\ +" + + if grep -q 'SENTRY_TSDB_OPTIONS = ' "$SENTRY_CONFIG_PY"; then + echo "Not attempting automatic TSDB migration due to presence of SENTRY_TSDB_OPTIONS" + else + echo "Attempting to automatically migrate to new TSDB" + # Escape newlines for sed + tsdb_settings="${tsdb_settings//$'\n'/\\n}" + cp "$SENTRY_CONFIG_PY" "$SENTRY_CONFIG_PY.bak" + sed -i -e "s/^SENTRY_TSDB = .*$/${tsdb_settings}/g" "$SENTRY_CONFIG_PY" || true + + if grep -xq 'SENTRY_TSDB = "sentry.tsdb.redissnuba.RedisSnubaTSDB"' "$SENTRY_CONFIG_PY"; then + echo "Migrated TSDB to Snuba. Old configuration file backed up to $SENTRY_CONFIG_PY.bak" + return + fi + + echo "Failed to automatically migrate TSDB. Reverting..." + mv "$SENTRY_CONFIG_PY.bak" "$SENTRY_CONFIG_PY" + echo "$SENTRY_CONFIG_PY restored from backup." + fi + + echo "WARN: Your Sentry configuration uses a legacy data store for time-series data. Remove the options SENTRY_TSDB and SENTRY_TSDB_OPTIONS from $SENTRY_CONFIG_PY and add:" + echo "" + echo "$tsdb_settings" + echo "" + echo "For more information please refer to https://github.com/getsentry/onpremise/pull/430" + fi +} + +replace_tsdb + +echo "${_endgroup}" diff --git a/install/set-up-and-migrate-database.sh b/install/set-up-and-migrate-database.sh new file mode 100644 index 0000000000..38d4093b36 --- /dev/null +++ b/install/set-up-and-migrate-database.sh @@ -0,0 +1,15 @@ +echo "${_group}Setting up / migrating database ..." + +if [[ -n "${CI:-}" || "${SKIP_USER_PROMPT:-0}" == 1 ]]; then + $dcr web upgrade --noinput + echo "" + echo "Did not prompt for user creation due to non-interactive shell." + echo "Run the following command to create one yourself (recommended):" + echo "" + echo " docker-compose run --rm web createuser" + echo "" +else + $dcr web upgrade +fi + +echo "${_endgroup}" diff --git a/install/set-up-zookeeper.sh b/install/set-up-zookeeper.sh new file mode 100644 index 0000000000..a4a56fec47 --- /dev/null +++ b/install/set-up-zookeeper.sh @@ -0,0 +1,16 @@ +echo "${_group}Setting up Zookeeper ..." + +ZOOKEEPER_SNAPSHOT_FOLDER_EXISTS=$($dcr zookeeper bash -c 'ls 2>/dev/null -Ubad1 -- /var/lib/zookeeper/data/version-2 | wc -l | tr -d '[:space:]'') +if [[ "$ZOOKEEPER_SNAPSHOT_FOLDER_EXISTS" -eq 1 ]]; then + ZOOKEEPER_LOG_FILE_COUNT=$($dcr zookeeper bash -c 'ls 2>/dev/null -Ubad1 -- /var/lib/zookeeper/log/version-2/* | wc -l | tr -d '[:space:]'') + ZOOKEEPER_SNAPSHOT_FILE_COUNT=$($dcr zookeeper bash -c 'ls 2>/dev/null -Ubad1 -- /var/lib/zookeeper/data/version-2/* | wc -l | tr -d '[:space:]'') + # This is a workaround for a ZK upgrade bug: https://issues.apache.org/jira/browse/ZOOKEEPER-3056 + cd .. + if [[ "$ZOOKEEPER_LOG_FILE_COUNT" -gt 0 ]] && [[ "$ZOOKEEPER_SNAPSHOT_FILE_COUNT" -eq 0 ]]; then + $dcr -v $(pwd)/zookeeper:/temp zookeeper bash -c 'cp /temp/snapshot.0 /var/lib/zookeeper/data/version-2/snapshot.0' + $dc run -d -e ZOOKEEPER_SNAPSHOT_TRUST_EMPTY=true zookeeper + fi + cd install +fi + +echo "${_endgroup}" diff --git a/install/turn-things-off.sh b/install/turn-things-off.sh new file mode 100644 index 0000000000..090dc8d396 --- /dev/null +++ b/install/turn-things-off.sh @@ -0,0 +1,14 @@ +echo "${_group}Turning things off ..." + +if [[ -n "$MINIMIZE_DOWNTIME" ]]; then + # Stop everything but relay and nginx + $dc rm -fsv $($dc config --services | grep -v -E '^(nginx|relay)$') +else + # Clean up old stuff and ensure nothing is working while we install/update + # This is for older versions of on-premise: + $dc -p onpremise down -t $STOP_TIMEOUT --rmi local --remove-orphans + # This is for newer versions + $dc down -t $STOP_TIMEOUT --rmi local --remove-orphans +fi + +echo "${_endgroup}" diff --git a/install/update-docker-images.sh b/install/update-docker-images.sh new file mode 100644 index 0000000000..e6d232cac7 --- /dev/null +++ b/install/update-docker-images.sh @@ -0,0 +1,13 @@ +echo "${_group}Fetching and updating Docker images ..." + +# We tag locally built images with an '-onpremise-local' suffix. docker-compose +# pull tries to pull these too and shows a 404 error on the console which is +# confusing and unnecessary. To overcome this, we add the stderr>stdout +# redirection below and pass it through grep, ignoring all lines having this +# '-onpremise-local' suffix. +$dc pull -q --ignore-pull-failures 2>&1 | grep -v -- -onpremise-local || true + +# We may not have the set image on the repo (local images) so allow fails +docker pull ${SENTRY_IMAGE} || true; + +echo "${_endgroup}" diff --git a/install/upgrade-postgres.sh b/install/upgrade-postgres.sh new file mode 100644 index 0000000000..1faccb82a1 --- /dev/null +++ b/install/upgrade-postgres.sh @@ -0,0 +1,24 @@ +echo "${_group}Ensuring proper PostgreSQL version ..." + +# Very naively check whether there's an existing sentry-postgres volume and the PG version in it +if [[ -n "$(docker volume ls -q --filter name=sentry-postgres)" && "$(docker run --rm -v sentry-postgres:/db busybox cat /db/PG_VERSION 2>/dev/null)" == "9.5" ]]; then + docker volume rm sentry-postgres-new || true + # If this is Postgres 9.5 data, start upgrading it to 9.6 in a new volume + docker run --rm \ + -v sentry-postgres:/var/lib/postgresql/9.5/data \ + -v sentry-postgres-new:/var/lib/postgresql/9.6/data \ + tianon/postgres-upgrade:9.5-to-9.6 + + # Get rid of the old volume as we'll rename the new one to that + docker volume rm sentry-postgres + docker volume create --name sentry-postgres + # There's no rename volume in Docker so copy the contents from old to new name + # Also append the `host all all all trust` line as `tianon/postgres-upgrade:9.5-to-9.6` + # doesn't do that automatically. + docker run --rm -v sentry-postgres-new:/from -v sentry-postgres:/to alpine ash -c \ + "cd /from ; cp -av . /to ; echo 'host all all all trust' >> /to/pg_hba.conf" + # Finally, remove the new old volume as we are all in sentry-postgres now + docker volume rm sentry-postgres-new +fi + +echo "${_endgroup}" diff --git a/install/wrap-up.sh b/install/wrap-up.sh new file mode 100644 index 0000000000..125e7e0133 --- /dev/null +++ b/install/wrap-up.sh @@ -0,0 +1,25 @@ +if [[ "$MINIMIZE_DOWNTIME" ]]; then + echo "${_group}Waiting for Sentry to start ..." + + # Start the whole setup, except nginx and relay. + $dc up -d --remove-orphans $($dc config --services | grep -v -E '^(nginx|relay)$') + $dc exec -T nginx nginx -s reload + + docker run --rm --network="${COMPOSE_PROJECT_NAME}_default" alpine ash \ + -c 'while [[ "$(wget -T 1 -q -O- http://web:9000/_health/)" != "ok" ]]; do sleep 0.5; done' + + # Make sure everything is up. This should only touch relay and nginx + $dc up -d + + echo "${_endgroup}" +else + echo "" + echo "-----------------------------------------------------------------" + echo "" + echo "You're all done! Run the following command to get Sentry running:" + echo "" + echo " $dc_base up -d" + echo "" + echo "-----------------------------------------------------------------" + echo "" +fi diff --git a/nginx/nginx.conf b/nginx/nginx.conf new file mode 100644 index 0000000000..ba03fb989e --- /dev/null +++ b/nginx/nginx.conf @@ -0,0 +1,74 @@ +user nginx; +worker_processes 1; + +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; + + +events { + worker_connections 1024; +} + + +http { + default_type application/octet-stream; + + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; + + access_log /var/log/nginx/access.log main; + + sendfile on; + tcp_nopush on; + tcp_nodelay on; + reset_timedout_connection on; + + keepalive_timeout 75s; + + gzip off; + server_tokens off; + + server_names_hash_bucket_size 64; + types_hash_max_size 2048; + types_hash_bucket_size 64; + client_max_body_size 100m; + + proxy_http_version 1.1; + proxy_redirect off; + proxy_buffering off; + proxy_next_upstream error timeout invalid_header http_502 http_503 non_idempotent; + proxy_next_upstream_tries 2; + + # Remove the Connection header if the client sends it, + # it could be "close" to close a keepalive connection + proxy_set_header Connection ''; + proxy_set_header Host $host; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Request-Id $request_id; + proxy_read_timeout 30s; + proxy_send_timeout 5s; + + upstream relay { + server relay:3000; + } + + upstream sentry { + server web:9000; + } + + server { + listen 80; + + location /api/store/ { + proxy_pass http://relay; + } + location ~ ^/api/[1-9]\d*/ { + proxy_pass http://relay; + } + location / { + proxy_pass http://sentry; + } + } +} diff --git a/postgres/init_hba.sh b/postgres/init_hba.sh new file mode 100755 index 0000000000..f4b332abfa --- /dev/null +++ b/postgres/init_hba.sh @@ -0,0 +1,7 @@ +#!/bin/bash +# Initializes the pg_hba file with access permissions to the replication +# slots. + +set -e + +{ echo "host replication all all trust"; } >> "$PGDATA/pg_hba.conf" diff --git a/postgres/postgres-entrypoint.sh b/postgres/postgres-entrypoint.sh new file mode 100755 index 0000000000..0b0d98a964 --- /dev/null +++ b/postgres/postgres-entrypoint.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# This script replaces the default docker entrypoint for postgres in the +# development environment. +# Its job is to ensure postgres is properly configured to support the +# Change Data Capture pipeline (by setting access permissions and installing +# the replication plugin we use for CDC). Unfortunately the default +# Postgres image does not allow this level of configurability so we need +# to do it this way in order not to have to publish and maintain our own +# Postgres image. +# +# This then, at the end, transfers control to the default entrypoint. + +set -e + +prep_init_db() { + cp /opt/sentry/init_hba.sh /docker-entrypoint-initdb.d/init_hba.sh +} + +cdc_setup_hba_conf() { + # Ensure pg-hba is properly configured to allow connections + # to the replication slots. + + PG_HBA="$PGDATA/pg_hba.conf" + if [ ! -f "$PG_HBA" ]; then + echo "DB not initialized. Postgres will take care of pg_hba" + elif [ "$(grep -c -E "^host\s+replication" "$PGDATA"/pg_hba.conf)" != 0 ]; then + echo "Replication config already present in pg_hba. Not changing anything." + else + # Execute the same script we run on DB initialization + /opt/sentry/init_hba.sh + fi +} + +bind_wal2json() { + # Copy the file in the right place + cp /opt/sentry/wal2json/wal2json.so `pg_config --pkglibdir`/wal2json.so +} + +echo "Setting up Change Data Capture" + +prep_init_db +if [ "$1" = 'postgres' ]; then + cdc_setup_hba_conf + bind_wal2json +fi +exec /docker-entrypoint.sh "$@" diff --git a/relay/config.example.yml b/relay/config.example.yml new file mode 100644 index 0000000000..52e6630671 --- /dev/null +++ b/relay/config.example.yml @@ -0,0 +1,13 @@ +relay: + upstream: "http://web:9000/" + host: 0.0.0.0 + port: 3000 +logging: + level: WARN +processing: + enabled: true + kafka_config: + - {name: "bootstrap.servers", value: "kafka:9092"} + - {name: "message.max.bytes", value: 50000000} # 50MB + redis: redis://redis:6379 + geoip_path: "/geoip/GeoLite2-City.mmdb" diff --git a/reset.sh b/reset.sh new file mode 100755 index 0000000000..0e4db0e93e --- /dev/null +++ b/reset.sh @@ -0,0 +1,62 @@ +#!/usr/bin/env bash + +# The purpose of this script is to make it easy to reset a local onpremise +# install to a clean state, optionally targeting a particular version. + +set -euo pipefail + +if [ -n "${DEBUG:-}" ]; then + set -x +fi + +cd "$(dirname $0)" + + +function confirm () { + read -p "$1 [y/n] " confirmation + if [ "$confirmation" != "y" ]; then + echo "Canceled. πŸ˜…" + exit + fi +} + + +# If we have a version given, validate it. +# ---------------------------------------- +# Note that arbitrary git refs won't work, because the *_IMAGE variables in +# .env will almost certainly point to :latest. Tagged releases are generally +# the only refs where these component versions are pinned, so enforce that +# we're targeting a valid tag here. Do this early in order to fail fast. + +version="${1:-}" +if [ -n "$version" ]; then + set +e + git rev-parse --verify --quiet "refs/tags/$version" > /dev/null + if [ $? -gt 0 ]; then + echo "Bad version: $version" + exit + fi + set -e +fi + +# Make sure they mean it. +confirm "☠️ Warning! 😳 This is highly destructive! 😱 Are you sure you wish to proceed?" +echo "Okay ... good luck! 😰" + +# Hit the reset button. +docker-compose down --volumes --remove-orphans --rmi local + +# Remove any remaining (likely external) volumes with name matching 'sentry-.*'. +for volume in $(docker volume list --format '{{ .Name }}' | grep '^sentry-'); do + docker volume remove $volume > /dev/null \ + && echo "Removed volume: $volume" \ + || echo "Skipped volume: $volume" +done + +# If we have a version given, switch to it. +if [ -n "$version" ]; then + git checkout "$version" +fi + +# Install. +exec ./install.sh diff --git a/scripts/bump-version.sh b/scripts/bump-version.sh new file mode 100755 index 0000000000..b5d4586c42 --- /dev/null +++ b/scripts/bump-version.sh @@ -0,0 +1,21 @@ +#!/bin/bash +set -eu + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $SCRIPT_DIR/.. + +OLD_VERSION="$1" +NEW_VERSION="$2" + +SYMBOLICATOR_VERSION=${SYMBOLICATOR_VERSION:-$(curl -s "https://api.github.com/repos/getsentry/symbolicator/releases/latest" | grep -Po '"tag_name": "\K.*?(?=")')} +WAL2JSON_VERSION=${WAL2JSON_VERSION:-$(curl -s "https://api.github.com/repos/getsentry/wal2json/releases/latest" | grep -Po '"tag_name": "\K.*?(?=")')} + +sed -i -e "s/^SYMBOLICATOR_IMAGE=\([^:]\+\):.\+\$/SYMBOLICATOR_IMAGE=\1:$SYMBOLICATOR_VERSION/" .env +sed -i -e "s/^WAL2JSON_VERSION=\([^:]\+\):.\+\$/WAL2JSON_VERSION=\1:$WAL2JSON_VERSION/" .env +sed -i -e "s/^\(SENTRY\|SNUBA\|RELAY\)_IMAGE=\([^:]\+\):.\+\$/\1_IMAGE=\2:$NEW_VERSION/" .env +sed -i -e "s/^\# Self-Hosted Sentry .*/# Self-Hosted Sentry $NEW_VERSION/" README.md +sed -i -e "s/\(Change Date:\s*\)[-0-9]\+\$/\\1$(date +'%Y-%m-%d' -d '3 years')/" LICENSE + +echo "New version: $NEW_VERSION" +echo "New Symbolicator version: $SYMBOLICATOR_VERSION" +echo "New wal2json version: $WAL2JSON_VERSION" diff --git a/scripts/post-release.sh b/scripts/post-release.sh new file mode 100755 index 0000000000..d0a8f7b210 --- /dev/null +++ b/scripts/post-release.sh @@ -0,0 +1,10 @@ +#!/bin/bash +set -eu + +SCRIPT_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +cd $SCRIPT_DIR/.. + +# Bring master back to nightlies after merge from release branch +git checkout master && git pull +SYMBOLICATOR_VERSION=nightly ./scripts/bump-version.sh '' 'nightly' +git diff --quiet || git commit -anm $'build: Set master version to nightly\n\n#skip-changelog' && git pull --rebase && git push diff --git a/sentry.conf.py b/sentry.conf.py deleted file mode 100644 index e4e0baf184..0000000000 --- a/sentry.conf.py +++ /dev/null @@ -1,306 +0,0 @@ -# This file is just Python, with a touch of Django which means -# you can inherit and tweak settings to your hearts content. - -# For Docker, the following environment variables are supported: -# SENTRY_POSTGRES_HOST -# SENTRY_POSTGRES_PORT -# SENTRY_DB_NAME -# SENTRY_DB_USER -# SENTRY_DB_PASSWORD -# SENTRY_RABBITMQ_HOST -# SENTRY_RABBITMQ_USERNAME -# SENTRY_RABBITMQ_PASSWORD -# SENTRY_RABBITMQ_VHOST -# SENTRY_REDIS_HOST -# SENTRY_REDIS_PASSWORD -# SENTRY_REDIS_PORT -# SENTRY_REDIS_DB -# SENTRY_MEMCACHED_HOST -# SENTRY_MEMCACHED_PORT -# SENTRY_FILESTORE_DIR -# SENTRY_SERVER_EMAIL -# SENTRY_EMAIL_HOST -# SENTRY_EMAIL_PORT -# SENTRY_EMAIL_USER -# SENTRY_EMAIL_PASSWORD -# SENTRY_EMAIL_USE_TLS -# SENTRY_ENABLE_EMAIL_REPLIES -# SENTRY_SMTP_HOSTNAME -# SENTRY_MAILGUN_API_KEY -# SENTRY_SINGLE_ORGANIZATION -# SENTRY_SECRET_KEY -# GITHUB_APP_ID -# GITHUB_API_SECRET -# BITBUCKET_CONSUMER_KEY -# BITBUCKET_CONSUMER_SECRET -from sentry.conf.server import * # NOQA - -import os -import os.path - -CONF_ROOT = os.path.dirname(__file__) - -postgres = env('SENTRY_POSTGRES_HOST') or (env('POSTGRES_PORT_5432_TCP_ADDR') and 'postgres') -if postgres: - DATABASES = { - 'default': { - 'ENGINE': 'sentry.db.postgres', - 'NAME': ( - env('SENTRY_DB_NAME') - or env('POSTGRES_ENV_POSTGRES_USER') - or 'postgres' - ), - 'USER': ( - env('SENTRY_DB_USER') - or env('POSTGRES_ENV_POSTGRES_USER') - or 'postgres' - ), - 'PASSWORD': ( - env('SENTRY_DB_PASSWORD') - or env('POSTGRES_ENV_POSTGRES_PASSWORD') - or '' - ), - 'HOST': postgres, - 'PORT': ( - env('SENTRY_POSTGRES_PORT') - or '' - ), - 'OPTIONS': { - 'autocommit': True, - }, - }, - } - -# You should not change this setting after your database has been created -# unless you have altered all schemas first -SENTRY_USE_BIG_INTS = True - -# If you're expecting any kind of real traffic on Sentry, we highly recommend -# configuring the CACHES and Redis settings - -########### -# General # -########### - -# Instruct Sentry that this install intends to be run by a single organization -# and thus various UI optimizations should be enabled. -SENTRY_SINGLE_ORGANIZATION = env('SENTRY_SINGLE_ORGANIZATION', True) - -######### -# Redis # -######### - -# Generic Redis configuration used as defaults for various things including: -# Buffers, Quotas, TSDB - -redis = env('SENTRY_REDIS_HOST') or (env('REDIS_PORT_6379_TCP_ADDR') and 'redis') -if not redis: - raise Exception('Error: REDIS_PORT_6379_TCP_ADDR (or SENTRY_REDIS_HOST) is undefined, did you forget to `--link` a redis container?') - -redis_password = env('SENTRY_REDIS_PASSWORD') or '' -redis_port = env('SENTRY_REDIS_PORT') or '6379' -redis_db = env('SENTRY_REDIS_DB') or '0' - -SENTRY_OPTIONS.update({ - 'redis.clusters': { - 'default': { - 'hosts': { - 0: { - 'host': redis, - 'password': redis_password, - 'port': redis_port, - 'db': redis_db, - }, - }, - }, - }, -}) - -######### -# Cache # -######### - -# Sentry currently utilizes two separate mechanisms. While CACHES is not a -# requirement, it will optimize several high throughput patterns. - -memcached = env('SENTRY_MEMCACHED_HOST') or (env('MEMCACHED_PORT_11211_TCP_ADDR') and 'memcached') -if memcached: - memcached_port = ( - env('SENTRY_MEMCACHED_PORT') - or '11211' - ) - CACHES = { - 'default': { - 'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache', - 'LOCATION': [memcached + ':' + memcached_port], - 'TIMEOUT': 3600, - } - } - -# A primary cache is required for things such as processing events -SENTRY_CACHE = 'sentry.cache.redis.RedisCache' - -######### -# Queue # -######### - -# See https://docs.getsentry.com/on-premise/server/queue/ for more -# information on configuring your queue broker and workers. Sentry relies -# on a Python framework called Celery to manage queues. - -rabbitmq = env('SENTRY_RABBITMQ_HOST') or (env('RABBITMQ_PORT_5672_TCP_ADDR') and 'rabbitmq') - -if rabbitmq: - BROKER_URL = ( - 'amqp://' + ( - env('SENTRY_RABBITMQ_USERNAME') - or env('RABBITMQ_ENV_RABBITMQ_DEFAULT_USER') - or 'guest' - ) + ':' + ( - env('SENTRY_RABBITMQ_PASSWORD') - or env('RABBITMQ_ENV_RABBITMQ_DEFAULT_PASS') - or 'guest' - ) + '@' + rabbitmq + '/' + ( - env('SENTRY_RABBITMQ_VHOST') - or env('RABBITMQ_ENV_RABBITMQ_DEFAULT_VHOST') - or '/' - ) - ) -else: - BROKER_URL = 'redis://:' + redis_password + '@' + redis + ':' + redis_port + '/' + redis_db - - -############### -# Rate Limits # -############### - -# Rate limits apply to notification handlers and are enforced per-project -# automatically. - -SENTRY_RATELIMITER = 'sentry.ratelimits.redis.RedisRateLimiter' - -################## -# Update Buffers # -################## - -# Buffers (combined with queueing) act as an intermediate layer between the -# database and the storage API. They will greatly improve efficiency on large -# numbers of the same events being sent to the API in a short amount of time. -# (read: if you send any kind of real data to Sentry, you should enable buffers) - -SENTRY_BUFFER = 'sentry.buffer.redis.RedisBuffer' - -########## -# Quotas # -########## - -# Quotas allow you to rate limit individual projects or the Sentry install as -# a whole. - -SENTRY_QUOTAS = 'sentry.quotas.redis.RedisQuota' - -######## -# TSDB # -######## - -# The TSDB is used for building charts as well as making things like per-rate -# alerts possible. - -SENTRY_TSDB = 'sentry.tsdb.redis.RedisTSDB' - -########### -# Digests # -########### - -# The digest backend powers notification summaries. - -SENTRY_DIGESTS = 'sentry.digests.backends.redis.RedisBackend' - -################ -# File storage # -################ - -# Uploaded media uses these `filestore` settings. The available -# backends are either `filesystem` or `s3`. - -SENTRY_OPTIONS['filestore.backend'] = 'filesystem' -SENTRY_OPTIONS['filestore.options'] = { - 'location': env('SENTRY_FILESTORE_DIR'), -} - -############## -# Web Server # -############## - -# If you're using a reverse SSL proxy, you should enable the X-Forwarded-Proto -# header and set `SENTRY_USE_SSL=1` - -if env('SENTRY_USE_SSL', False): - SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') - SESSION_COOKIE_SECURE = True - CSRF_COOKIE_SECURE = True - SOCIAL_AUTH_REDIRECT_IS_HTTPS = True - -SENTRY_WEB_HOST = '0.0.0.0' -SENTRY_WEB_PORT = 9000 -SENTRY_WEB_OPTIONS = { - # 'workers': 3, # the number of web workers -} - -############### -# Mail Server # -############### - - -email = env('SENTRY_EMAIL_HOST') or (env('SMTP_PORT_25_TCP_ADDR') and 'smtp') -if email: - SENTRY_OPTIONS['mail.backend'] = 'smtp' - SENTRY_OPTIONS['mail.host'] = email - SENTRY_OPTIONS['mail.password'] = env('SENTRY_EMAIL_PASSWORD') or '' - SENTRY_OPTIONS['mail.username'] = env('SENTRY_EMAIL_USER') or '' - SENTRY_OPTIONS['mail.port'] = int(env('SENTRY_EMAIL_PORT') or 25) - SENTRY_OPTIONS['mail.use-tls'] = env('SENTRY_EMAIL_USE_TLS', False) -else: - SENTRY_OPTIONS['mail.backend'] = 'dummy' - -# The email address to send on behalf of -SENTRY_OPTIONS['mail.from'] = env('SENTRY_SERVER_EMAIL') or 'root@localhost' - -# If you're using mailgun for inbound mail, set your API key and configure a -# route to forward to /api/hooks/mailgun/inbound/ -SENTRY_OPTIONS['mail.mailgun-api-key'] = env('SENTRY_MAILGUN_API_KEY') or '' - -# If you specify a MAILGUN_API_KEY, you definitely want EMAIL_REPLIES -if SENTRY_OPTIONS['mail.mailgun-api-key']: - SENTRY_OPTIONS['mail.enable-replies'] = True -else: - SENTRY_OPTIONS['mail.enable-replies'] = env('SENTRY_ENABLE_EMAIL_REPLIES', False) - -if SENTRY_OPTIONS['mail.enable-replies']: - SENTRY_OPTIONS['mail.reply-hostname'] = env('SENTRY_SMTP_HOSTNAME') or '' - -# If this value ever becomes compromised, it's important to regenerate your -# SENTRY_SECRET_KEY. Changing this value will result in all current sessions -# being invalidated. -secret_key = env('SENTRY_SECRET_KEY') -if not secret_key: - raise Exception('Error: SENTRY_SECRET_KEY is undefined, run `generate-secret-key` and set to -e SENTRY_SECRET_KEY') - -if 'SENTRY_RUNNING_UWSGI' not in os.environ and len(secret_key) < 32: - print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') - print('!! CAUTION !!') - print('!! Your SENTRY_SECRET_KEY is potentially insecure. !!') - print('!! We recommend at least 32 characters long. !!') - print('!! Regenerate with `generate-secret-key`. !!') - print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!') - -SENTRY_OPTIONS['system.secret-key'] = secret_key - -if 'GITHUB_APP_ID' in os.environ: - GITHUB_EXTENDED_PERMISSIONS = ['repo'] - GITHUB_APP_ID = env('GITHUB_APP_ID') - GITHUB_API_SECRET = env('GITHUB_API_SECRET') - -if 'BITBUCKET_CONSUMER_KEY' in os.environ: - BITBUCKET_CONSUMER_KEY = env('BITBUCKET_CONSUMER_KEY') - BITBUCKET_CONSUMER_SECRET = env('BITBUCKET_CONSUMER_SECRET') diff --git a/sentry/config.example.yml b/sentry/config.example.yml new file mode 100644 index 0000000000..22a236ab1f --- /dev/null +++ b/sentry/config.example.yml @@ -0,0 +1,116 @@ +# While a lot of configuration in Sentry can be changed via the UI, for all +# new-style config (as of 8.0) you can also declare values here in this file +# to enforce defaults or to ensure they cannot be changed via the UI. For more +# information see the Sentry documentation. + +############### +# Mail Server # +############### + +# mail.backend: 'smtp' # Use dummy if you want to disable email entirely +mail.host: 'smtp' +# mail.port: 25 +# mail.username: '' +# mail.password: '' +# mail.use-tls: false +# mail.use-ssl: false + +# NOTE: The following 2 configs (mail.from and mail.list-namespace) are set +# through SENTRY_MAIL_HOST in sentry.conf.py so remove those first if +# you want your values in this file to be effective! + + +# The email address to send on behalf of +# mail.from: 'root@localhost' + +# The mailing list namespace for emails sent by this Sentry server. +# This should be a domain you own (often the same domain as the domain +# part of the `mail.from` configuration parameter value) or `localhost`. +# mail.list-namespace: 'localhost' + +# If you'd like to configure email replies, enable this. +# mail.enable-replies: true + +# When email-replies are enabled, this value is used in the Reply-To header +# mail.reply-hostname: '' + +# If you're using mailgun for inbound mail, set your API key and configure a +# route to forward to /api/hooks/mailgun/inbound/ +# Also don't forget to set `mail.enable-replies: true` above. +# mail.mailgun-api-key: '' + +################### +# System Settings # +################### + +# If this file ever becomes compromised, it's important to generate a new key. +# Changing this value will result in all current sessions being invalidated. +# A new key can be generated with `$ sentry config generate-secret-key` +system.secret-key: '!!changeme!!' + +# The ``redis.clusters`` setting is used, unsurprisingly, to configure Redis +# clusters. These clusters can be then referred to by name when configuring +# backends such as the cache, digests, or TSDB backend. +# redis.clusters: +# default: +# hosts: +# 0: +# host: 127.0.0.1 +# port: 6379 + +################ +# File storage # +################ + +# Uploaded media uses these `filestore` settings. The available +# backends are either `filesystem` or `s3`. + +filestore.backend: 'filesystem' +filestore.options: + location: '/data/files' +dsym.cache-path: '/data/dsym-cache' +releasefile.cache-path: '/data/releasefile-cache' + +# filestore.backend: 's3' +# filestore.options: +# access_key: 'AKIXXXXXX' +# secret_key: 'XXXXXXX' +# bucket_name: 's3-bucket-name' + +system.internal-url-prefix: 'http://web:9000' +symbolicator.enabled: true +symbolicator.options: + url: "http://symbolicator:3021" + +transaction-events.force-disable-internal-project: true + +###################### +# GitHub Integration # +###################### + +# github-login.extended-permissions: ['repo'] +# github-app.id: GITHUB_APP_ID +# github-app.name: 'GITHUB_APP_NAME' +# github-app.webhook-secret: 'GITHUB_WEBHOOK_SECRET' # Use only if configured in GitHub +# github-app.client-id: 'GITHUB_CLIENT_ID' +# github-app.client-secret: 'GITHUB_CLIENT_SECRET' +# github-app.private-key: | +# -----BEGIN RSA PRIVATE KEY----- +# privatekeyprivatekeyprivatekeyprivatekey +# privatekeyprivatekeyprivatekeyprivatekey +# privatekeyprivatekeyprivatekeyprivatekey +# privatekeyprivatekeyprivatekeyprivatekey +# privatekeyprivatekeyprivatekeyprivatekey +# -----END RSA PRIVATE KEY----- + +##################### +# Slack Integration # +##################### + +# Refer to https://develop.sentry.dev/integrations/slack/ for setup instructions. + +# slack.client-id: <'client id'> +# slack.client-secret: +# slack.signing-secret: +## If legacy-app is True use verfication-token instead of signing-secret +# slack.verification-token: diff --git a/sentry/entrypoint.sh b/sentry/entrypoint.sh new file mode 100755 index 0000000000..2f2614a798 --- /dev/null +++ b/sentry/entrypoint.sh @@ -0,0 +1,20 @@ +#!/bin/bash +set -e + +if [ "$(ls -A /usr/local/share/ca-certificates/)" ]; then + update-ca-certificates +fi + +req_file="/etc/sentry/requirements.txt" +plugins_dir="/data/custom-packages" +checksum_file="$plugins_dir/.checksum" + +if [[ -s "$req_file" ]] && ! cat "$req_file" | grep '^[^#[:space:]]' | shasum -s -c "$checksum_file" 2>/dev/null; then + echo "Installing additional dependencies..." + mkdir -p "$plugins_dir" + pip install --user -r "$req_file" + cat "$req_file" | grep '^[^#[:space:]]' | shasum > "$checksum_file" + echo "" +fi + +source /docker-entrypoint.sh diff --git a/requirements.txt b/sentry/requirements.example.txt similarity index 100% rename from requirements.txt rename to sentry/requirements.example.txt diff --git a/sentry/sentry.conf.example.py b/sentry/sentry.conf.example.py new file mode 100644 index 0000000000..355ae63363 --- /dev/null +++ b/sentry/sentry.conf.example.py @@ -0,0 +1,281 @@ +# This file is just Python, with a touch of Django which means +# you can inherit and tweak settings to your hearts content. + +from sentry.conf.server import * # NOQA + + +# Generously adapted from pynetlinux: https://git.io/JJmga +def get_internal_network(): + import ctypes + import fcntl + import math + import socket + import struct + + iface = b"eth0" + sockfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + ifreq = struct.pack(b"16sH14s", iface, socket.AF_INET, b"\x00" * 14) + + try: + ip = struct.unpack( + b"!I", struct.unpack(b"16sH2x4s8x", fcntl.ioctl(sockfd, 0x8915, ifreq))[2] + )[0] + netmask = socket.ntohl( + struct.unpack(b"16sH2xI8x", fcntl.ioctl(sockfd, 0x891B, ifreq))[2] + ) + except IOError: + return () + base = socket.inet_ntoa(struct.pack(b"!I", ip & netmask)) + netmask_bits = 32 - int(round(math.log(ctypes.c_uint32(~netmask).value + 1, 2), 1)) + return "{0:s}/{1:d}".format(base, netmask_bits) + + +INTERNAL_SYSTEM_IPS = (get_internal_network(),) + + +DATABASES = { + "default": { + "ENGINE": "sentry.db.postgres", + "NAME": "postgres", + "USER": "postgres", + "PASSWORD": "", + "HOST": "postgres", + "PORT": "", + } +} + +# You should not change this setting after your database has been created +# unless you have altered all schemas first +SENTRY_USE_BIG_INTS = True + +# If you're expecting any kind of real traffic on Sentry, we highly recommend +# configuring the CACHES and Redis settings + +########### +# General # +########### + +# Instruct Sentry that this install intends to be run by a single organization +# and thus various UI optimizations should be enabled. +SENTRY_SINGLE_ORGANIZATION = True + +SENTRY_OPTIONS["system.event-retention-days"] = int( + env("SENTRY_EVENT_RETENTION_DAYS", "90") +) + +######### +# Redis # +######### + +# Generic Redis configuration used as defaults for various things including: +# Buffers, Quotas, TSDB + +SENTRY_OPTIONS["redis.clusters"] = { + "default": { + "hosts": {0: {"host": "redis", "password": "", "port": "6379", "db": "0"}} + } +} + +######### +# Queue # +######### + +# See https://develop.sentry.dev/services/queue/ for more +# information on configuring your queue broker and workers. Sentry relies +# on a Python framework called Celery to manage queues. + +rabbitmq_host = None +if rabbitmq_host: + BROKER_URL = "amqp://{username}:{password}@{host}/{vhost}".format( + username="guest", password="guest", host=rabbitmq_host, vhost="/" + ) +else: + BROKER_URL = "redis://:{password}@{host}:{port}/{db}".format( + **SENTRY_OPTIONS["redis.clusters"]["default"]["hosts"][0] + ) + + +######### +# Cache # +######### + +# Sentry currently utilizes two separate mechanisms. While CACHES is not a +# requirement, it will optimize several high throughput patterns. + +CACHES = { + "default": { + "BACKEND": "django.core.cache.backends.memcached.MemcachedCache", + "LOCATION": ["memcached:11211"], + "TIMEOUT": 3600, + } +} + +# A primary cache is required for things such as processing events +SENTRY_CACHE = "sentry.cache.redis.RedisCache" + +DEFAULT_KAFKA_OPTIONS = { + "bootstrap.servers": "kafka:9092", + "message.max.bytes": 50000000, + "socket.timeout.ms": 1000, +} + +SENTRY_EVENTSTREAM = "sentry.eventstream.kafka.KafkaEventStream" +SENTRY_EVENTSTREAM_OPTIONS = {"producer_configuration": DEFAULT_KAFKA_OPTIONS} + +KAFKA_CLUSTERS["default"] = DEFAULT_KAFKA_OPTIONS + +############### +# Rate Limits # +############### + +# Rate limits apply to notification handlers and are enforced per-project +# automatically. + +SENTRY_RATELIMITER = "sentry.ratelimits.redis.RedisRateLimiter" + +################## +# Update Buffers # +################## + +# Buffers (combined with queueing) act as an intermediate layer between the +# database and the storage API. They will greatly improve efficiency on large +# numbers of the same events being sent to the API in a short amount of time. +# (read: if you send any kind of real data to Sentry, you should enable buffers) + +SENTRY_BUFFER = "sentry.buffer.redis.RedisBuffer" + +########## +# Quotas # +########## + +# Quotas allow you to rate limit individual projects or the Sentry install as +# a whole. + +SENTRY_QUOTAS = "sentry.quotas.redis.RedisQuota" + +######## +# TSDB # +######## + +# The TSDB is used for building charts as well as making things like per-rate +# alerts possible. + +SENTRY_TSDB = "sentry.tsdb.redissnuba.RedisSnubaTSDB" + +######### +# SNUBA # +######### + +SENTRY_SEARCH = "sentry.search.snuba.EventsDatasetSnubaSearchBackend" +SENTRY_SEARCH_OPTIONS = {} +SENTRY_TAGSTORE_OPTIONS = {} + +########### +# Digests # +########### + +# The digest backend powers notification summaries. + +SENTRY_DIGESTS = "sentry.digests.backends.redis.RedisBackend" + +############## +# Web Server # +############## + +SENTRY_WEB_HOST = "0.0.0.0" +SENTRY_WEB_PORT = 9000 +SENTRY_WEB_OPTIONS = { + "http": "%s:%s" % (SENTRY_WEB_HOST, SENTRY_WEB_PORT), + "protocol": "uwsgi", + # This is needed in order to prevent https://git.io/fj7Lw + "uwsgi-socket": None, + "so-keepalive": True, + # Keep this between 15s-75s as that's what Relay supports + "http-keepalive": 15, + "http-chunked-input": True, + # the number of web workers + "workers": 3, + "threads": 4, + "memory-report": False, + # Some stuff so uwsgi will cycle workers sensibly + "max-requests": 100000, + "max-requests-delta": 500, + "max-worker-lifetime": 86400, + # Duplicate options from sentry default just so we don't get + # bit by sentry changing a default value that we depend on. + "thunder-lock": True, + "log-x-forwarded-for": False, + "buffer-size": 32768, + "limit-post": 209715200, + "disable-logging": True, + "reload-on-rss": 600, + "ignore-sigpipe": True, + "ignore-write-errors": True, + "disable-write-exception": True, +} + +########### +# SSL/TLS # +########### + +# If you're using a reverse SSL proxy, you should enable the X-Forwarded-Proto +# header and enable the settings below + +# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https') +# SESSION_COOKIE_SECURE = True +# CSRF_COOKIE_SECURE = True +# SOCIAL_AUTH_REDIRECT_IS_HTTPS = True + +# End of SSL/TLS settings + +######## +# Mail # +######## + +SENTRY_OPTIONS["mail.list-namespace"] = env('SENTRY_MAIL_HOST', 'localhost') +SENTRY_OPTIONS["mail.from"] = f"sentry@{SENTRY_OPTIONS['mail.list-namespace']}" + +############ +# Features # +############ + +SENTRY_FEATURES["projects:sample-events"] = False +SENTRY_FEATURES.update( + { + feature: True + for feature in ( + "organizations:discover", + "organizations:events", + "organizations:global-views", + "organizations:incidents", + "organizations:integrations-issue-basic", + "organizations:integrations-issue-sync", + "organizations:invite-members", + "organizations:metric-alert-builder-aggregate", + "organizations:sso-basic", + "organizations:sso-rippling", + "organizations:sso-saml2", + "organizations:performance-view", + "organizations:advanced-search", + "projects:custom-inbound-filters", + "projects:data-forwarding", + "projects:discard-groups", + "projects:plugins", + "projects:rate-limits", + "projects:servicehooks", + ) + } +) + +####################### +# MaxMind Integration # +####################### + +GEOIP_PATH_MMDB = '/geoip/GeoLite2-City.mmdb' + +######################### +# Bitbucket Integration # +######################### + +# BITBUCKET_CONSUMER_KEY = 'YOUR_BITBUCKET_CONSUMER_KEY' +# BITBUCKET_CONSUMER_SECRET = 'YOUR_BITBUCKET_CONSUMER_SECRET' diff --git a/symbolicator/config.example.yml b/symbolicator/config.example.yml new file mode 100644 index 0000000000..62cf9b83b7 --- /dev/null +++ b/symbolicator/config.example.yml @@ -0,0 +1,8 @@ +# See: https://getsentry.github.io/symbolicator/#configuration +cache_dir: "/data" +bind: "0.0.0.0:3021" +logging: + level: "warn" +metrics: + statsd: null +sentry_dsn: null # TODO: Automatically fill this with the internal project DSN diff --git a/test.sh b/test.sh new file mode 100755 index 0000000000..85945dc3c7 --- /dev/null +++ b/test.sh @@ -0,0 +1,4 @@ +#!/usr/bin/env bash +set -e + +./_integration-test/run.sh \ No newline at end of file diff --git a/zookeeper/snapshot.0 b/zookeeper/snapshot.0 new file mode 100644 index 0000000000..3e6deee02b Binary files /dev/null and b/zookeeper/snapshot.0 differ