diff --git a/.travis.yml b/.travis.yml index 898c263..729b4d3 100644 --- a/.travis.yml +++ b/.travis.yml @@ -12,7 +12,6 @@ services: - mysql go: - - "1.10" - "1.11" - "1.12" - "1.13" diff --git a/DOCKER.md b/DOCKER.md deleted file mode 100644 index 1dfc989..0000000 --- a/DOCKER.md +++ /dev/null @@ -1,47 +0,0 @@ -# migrator docker - -To run migrator docker you need to: - -* pull `lukasz/migrator` from docker cloud -* mount a volume with migrations under `/data` -* (optional) specify location of migrator configuration file via environmental variable `MIGRATOR_YAML`, defaults to `/data/migrator.yaml` - -To run migrator as a service: - -```bash -docker run -p 8080:8080 -v /Users/lukasz/migrator-test:/data -e MIGRATOR_YAML=/data/m.yaml -d --link migrator-postgres lukasz/migrator -Starting migrator using config file: /data/m.yaml -2016/08/04 06:24:58 Read config file ==> OK -2016/08/04 06:24:58 Migrator web server starting on port 8080... -``` - -To run migrator in interactive terminal mode: - -```bash -docker run -it -v /Users/lukasz/migrator-test:/data --entrypoint sh --link migrator-postgres lukasz/migrator -``` - -# History and releases - -Here is a short history of migrator docker images: - -1. initial release in 2016 - migrator on debian:jessie - 603MB -2. v1.0 - migrator v1.0 on golang:1.11.2-alpine3.8 - 346MB -3. v1.0-mini - migrator v1.0 multi-stage build with final image on alpine:3.8 - 13.4MB -4. v2.0 - migrator v2.0 - 14.8MB - -Starting with v2.0 all migrator images by default use multi-stage builds. For migrator v1.0 you have to explicitly use `v1.0-mini` tag in order to enjoy an ultra lightweight migrator image. Still, I recommend using latest and greatest. - -Finally, starting with v2.0 migrator-docker project was merged into migrator main project. New version of docker image is built automatically every time a new release is created. - -To view all available docker containers see [lukasz/migrator/tags](https://cloud.docker.com/repository/docker/lukasz/migrator/tags). - -# License - -Copyright 2016-2018 Łukasz Budnik - -Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at - -http://www.apache.org/licenses/LICENSE-2.0 - -Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. diff --git a/Dockerfile b/Dockerfile index e676369..6edff49 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,4 +1,4 @@ -FROM golang:1.11.2-alpine3.8 as builder +FROM golang:1.13.5-alpine3.10 as builder MAINTAINER Łukasz Budnik lukasz.budnik@gmail.com @@ -14,7 +14,7 @@ RUN cd /go/src/github.com/lukaszbudnik/migrator && \ GIT_COMMIT_SHA=$(git rev-list -1 HEAD) && \ go build -ldflags "-X main.GitCommitDate=$GIT_COMMIT_DATE -X main.GitCommitSha=$GIT_COMMIT_SHA -X main.GitBranch=$GIT_BRANCH" -FROM alpine:3.8 +FROM alpine:3.10 COPY --from=builder /go/src/github.com/lukaszbudnik/migrator/migrator /bin VOLUME ["/data"] diff --git a/README.md b/README.md index d86f462..2c2ae3a 100644 --- a/README.md +++ b/README.md @@ -1,137 +1,355 @@ # migrator [![Build Status](https://travis-ci.org/lukaszbudnik/migrator.svg?branch=master)](https://travis-ci.org/lukaszbudnik/migrator) [![Go Report Card](https://goreportcard.com/badge/github.com/lukaszbudnik/migrator)](https://goreportcard.com/report/github.com/lukaszbudnik/migrator) [![codecov](https://codecov.io/gh/lukaszbudnik/migrator/branch/master/graph/badge.svg)](https://codecov.io/gh/lukaszbudnik/migrator) -Super fast and lightweight DB migration & evolution tool written in go. - -migrator manages all the DB changes for you and completely eliminates manual and error-prone administrative tasks. migrator not only supports single schemas, but also comes with a multi-tenant support. - -migrator run as a HTTP REST service. - -Further, there is an official docker image available on docker hub. migrator docker image is ultra lightweight and has a size of 15MB. Ideal for micro-services deployments! - -To find out more about migrator docker container see [DOCKER.md](DOCKER.md) for more details. +Super fast and lightweight DB migration tool written in go. + +migrator manages and versions all the DB changes for you and completely eliminates manual and error-prone administrative tasks. migrator not only supports single schemas, but also comes with a multi-tenant support. + +migrator runs as a HTTP REST service and can be easily integrated into your continuous integration and continuous delivery pipeline. + +Further, there is an official docker image available on docker hub. [lukasz/migrator](https://hub.docker.com/r/lukasz/migrator) is ultra lightweight and has a size of 15MB. Ideal for micro-services deployments! + +# Table of contents + +* [Usage](#usage) + * [GET /v1/config](#get-v1config) + * [GET /v1/migrations/source](#get-v1migrationssource) + * [GET /v1/migrations/applied](#get-v1migrationsapplied) + * [POST /v1/migrations](#post-v1migrations) + * [GET /v1/tenants](#get-v1tenants) + * [POST /v1/tenants](#post-v1tenants) + * [Request tracing](#request-tracing) +* [Quick Start Guide](#quick-start-guide) + * [1. Get the migrator project](#1-get-the-migrator-project) + * [2. Setup test DB container](#2-setup-test-db-container) + * [3. Build and run migrator](#3-build-and-run-migrator) + * [4. Run migrator from official docker image](#4-run-migrator-from-official-docker-image) + * [5. Play around with migrator](#5-play-around-with-migrator) +* [Configuration](#configuration) +* [Customisation and legacy frameworks support](#customisation-and-legacy-frameworks-support) + * [Custom tenants support](#custom-tenants-support) + * [Custom schema placeholder](#custom-schema-placeholder) + * [Synchonising legacy migrations to migrator](#synchonising-legacy-migrations-to-migrator) + * [Final comments](#final-comments) +* [Supported databases](#supported-databases) +* [Performance](#performance) +* [Change log](#change-log) +* [Contributing, code style, running unit & integration tests](#contributing-code-style-running-unit--integration-tests) +* [License](#license) # Usage -migrator exposes a simple REST API which you can use to invoke different actions: +migrator exposes a simple REST API described below. -* GET /config - returns migrator config (`application/x-yaml`) -* GET /diskMigrations - returns disk migrations (`application/json`) -* GET /tenants - returns tenants (`application/json`) -* POST /tenants - adds new tenant, name parameter is passed as JSON parameter, returns applied migrations (`application/json`) -* GET /migrations - returns all applied migrations (`application/json`) -* POST /migrations - applies migrations, no parameters required, returns applied migrations (`application/json`) +## GET /v1/config -Some curl examples to get you started: +Returns migrator's config as `application/x-yaml`. + +Sample request: ``` -curl -v http://localhost:8080/config -curl -v http://localhost:8080/diskMigrations -curl -v http://localhost:8080/tenants -curl -v http://localhost:8080/migrations -curl -v -X POST http://localhost:8080/migrations -curl -v -X POST -H "Content-Type: application/json" -d '{"name": "new_tenant"}' http://localhost:8080/tenants +curl -v http://localhost:8080/v1/config ``` -Port is configurable in `migrator.yaml` and defaults to 8080. Should you need HTTPS capabilities I encourage you to use nginx/apache/haproxy for TLS offloading. - -# Versions - -Please navigate to https://github.com/lukaszbudnik/migrator/releases for a complete list of versions, features, and changes. +Sample HTTP response: -# Configuration - -migrator requires a simple `migrator.yaml` file: +``` +< HTTP/1.1 200 OK +< Content-Type: application/x-yaml; charset=utf-8 +< Date: Wed, 01 Jan 2020 17:31:57 GMT +< Content-Length: 277 -```yaml -# required, base directory where all migrations are stored, see singleSchemas and tenantSchemas below baseDir: test/migrations -# required, SQL go driver implementation used, see section "Supported databases" driver: postgres -# required, dataSource format is specific to SQL go driver implementation used, see section "Supported databases" -dataSource: "user=postgres dbname=migrator_test host=192.168.99.100 port=55432 sslmode=disable" -# optional, override only if you have a specific way of determining tenants, default is: -tenantSelectSQL: "select name from migrator.migrator_tenants" -# optional, override only if you have a specific way of creating tenants, default is: -tenantInsertSQL: "insert into migrator.migrator_tenants (name) values ($1)" -# optional, override only if you have a specific schema placeholder, default is: -schemaPlaceHolder: {schema} -# required, directories of single schema SQL migrations, these are subdirectories of baseDir +dataSource: user=postgres dbname=migrator_test host=127.0.0.1 port=32776 sslmode=disable + connect_timeout=1 singleMigrations: - - public - - ref - - config -# optional, directories of tenant schemas SQL migrations, these are subdirectories of baseDir +- ref +- config tenantMigrations: - - tenants -# optional, directories of single SQL scripts which are applied always, these are subdirectories of baseDir +- tenants singleScripts: - - config-scripts -# optional, directories of tenant SQL script which are applied always for all tenants, these are subdirectories of baseDir +- config-scripts tenantScripts: - - tenants-scripts -# optional, default is: -port: 8080 -# the webhook configuration section is optional -# URL and template are required if at least one of them is empty noop notifier is used -# the default content type header sent is application/json (can be overridden via webHookHeaders below) -webHookURL: https://hooks.slack.com/services/TTT/BBB/XXX -# the {text} placeholder is replaced by migrator with information about executed migrations or added new tenant -webHookTemplate: "{\"text\": \"{text}\",\"icon_emoji\": \":white_check_mark:\"}" -# should you need more control over HTTP headers use below -webHookHeaders: - - "Authorization: Basic QWxhZGRpbjpPcGVuU2VzYW1l" - - "Content-Type: application/json" - - "X-CustomHeader: value1,value2" +- tenants-scripts ``` +## GET /v1/migrations/source -migrator supports env variables substitution in config file. All patterns matching `${NAME}` will look for env variable `NAME`. Below are some common use cases: +Returns list of all source migrations. Response is a list of JSON representation of `Migration` struct. -```yaml -dataSource: "user=${DB_USER} password=${DB_PASSWORD} dbname=${DB_NAME} host=${DB_HOST} port=${DB_PORT}" -webHookHeaders: - - "X-Security-Token: ${SECURITY_TOKEN}" +Sample request: + +``` +curl -v http://localhost:8080/v1/migrations/source ``` -# migrator under the hood +Sample HTTP response: -migrator scans all directories under `baseDir` directory. Migrations listed under `singleSchemas` directories will be applied once. Migrations listed under `tenantSchemas` directories will be applied for all tenants fetched using `tenantSelectSQL`. +``` +< HTTP/1.1 200 OK +< Content-Type: application/json; charset=utf-8 +< Date: Tue, 31 Dec 2019 11:27:48 GMT +< Transfer-Encoding: chunked + +[ + { + "name": "201602160002.sql", + "sourceDir": "/Users/lukasz/go/src/github.com/lukaszbudnik/migrator/test/migrations/config", + "file": "/Users/lukasz/go/src/github.com/lukaszbudnik/migrator/test/migrations/config/201602160002.sql", + "migrationType": 1, + "contents": "create table {schema}.config (\n id integer,\n k varchar(100),\n v varchar(100),\n primary key (id)\n);\n", + "checkSum": "58db38d8f6c197ab290212470a82fe1f5b1f3cacadbe00ac59cd68a3bfa98baf" + }, + { + "name": "201602160002.sql", + "sourceDir": "/Users/lukasz/go/src/github.com/lukaszbudnik/migrator/test/migrations/tenants", + "file": "/Users/lukasz/go/src/github.com/lukaszbudnik/migrator/test/migrations/tenants/201602160002.sql", + "migrationType": 2, + "contents": "create table {schema}.module (id integer, id_config integer, foreign key (id_config) references config.config(id));\n", + "checkSum": "56c4c1d8f82f3dedade5116be46267edee01a4889c6359ef03c39dc73ca653a8" + } +] +``` -SQL migrations in both `singleSchemas` and `tenantsSchemas` can use `{schema}` placeholder which will be automatically replaced by migrator with a current schema. For example: +`Migration` JSON contains the following fields: -```sql -create schema if not exists {schema}; -create table if not exists {schema}.modules ( k int, v text ); -insert into {schema}.modules values ( 123, '123' ); +* `name` - migration file name +* `sourceDir` - absolute path to source directory +* `file` - absolute path to migration file (concatenation of `sourceDir` and `name`) +* `migrationType` - type of migration, values are: + * 1 - single migration applied once for a given schema + * 2 - multi-tenant migration applied once but for all tenants/schemas + * 3 - single script - special type of migration applied always for a given schema + * 4 - multi-tenant script - special type of migration applied always for all tenants/schemas +* `contents` - contents of the migration file +* `checkSum` - sha256 checksum of migration file contents + +## GET /v1/migrations/applied + +Returns list of all applied migrations. Response is a list of JSON representation of `MigrationDB` struct. + +Sample request: + +``` +curl -v http://localhost:8080/v1/migrations/applied ``` -When using migrator please remember about these: +Sample HTTP response: -* migrator creates `migrator` schema (where `migrator_migrations` and `migrator_tenants` tables reside) automatically -* when adding a new tenant migrator creates a new schema automatically -* single schemas are not created automatically, for this you must add initial migration with `create schema` SQL statement (see example above) +``` +< HTTP/1.1 200 OK +< Content-Type: application/json; charset=utf-8 +< Date: Wed, 01 Jan 2020 17:32:49 GMT +< Transfer-Encoding: chunked + +[ + { + "name": "201602160001.sql", + "sourceDir": "/Users/lukasz/go/src/github.com/lukaszbudnik/migrator/test/migrations/config", + "file": "/Users/lukasz/go/src/github.com/lukaszbudnik/migrator/test/migrations/config/201602160001.sql", + "migrationType": 1, + "contents": "create schema config;\n", + "checkSum": "c1380af7a054ec75778252f539e1e9f914d2c5b1f441ea1df18c2140c6c3380a", + "schema": "config", + "appliedAt": "2020-01-01T17:29:13.169306Z" + }, + { + "name": "201602160002.sql", + "sourceDir": "/Users/lukasz/go/src/github.com/lukaszbudnik/migrator/test/migrations/config", + "file": "/Users/lukasz/go/src/github.com/lukaszbudnik/migrator/test/migrations/config/201602160002.sql", + "migrationType": 1, + "contents": "create table {schema}.config (\n id integer,\n k varchar(100),\n v varchar(100),\n primary key (id)\n);\n", + "checkSum": "58db38d8f6c197ab290212470a82fe1f5b1f3cacadbe00ac59cd68a3bfa98baf", + "schema": "config", + "appliedAt": "2020-01-01T17:29:13.169306Z" + }, + { + "name": "201602160002.sql", + "sourceDir": "/Users/lukasz/go/src/github.com/lukaszbudnik/migrator/test/migrations/tenants", + "file": "/Users/lukasz/go/src/github.com/lukaszbudnik/migrator/test/migrations/tenants/201602160002.sql", + "migrationType": 2, + "contents": "create table {schema}.module (id integer, id_config integer, foreign key (id_config) references config.config(id));\n", + "checkSum": "56c4c1d8f82f3dedade5116be46267edee01a4889c6359ef03c39dc73ca653a8", + "schema": "abc", + "appliedAt": "2020-01-01T17:29:13.169306Z" + }, + { + "name": "201602160002.sql", + "sourceDir": "/Users/lukasz/go/src/github.com/lukaszbudnik/migrator/test/migrations/tenants", + "file": "/Users/lukasz/go/src/github.com/lukaszbudnik/migrator/test/migrations/tenants/201602160002.sql", + "migrationType": 2, + "contents": "create table {schema}.module (id integer, id_config integer, foreign key (id_config) references config.config(id));\n", + "checkSum": "56c4c1d8f82f3dedade5116be46267edee01a4889c6359ef03c39dc73ca653a8", + "schema": "def", + "appliedAt": "2020-01-01T17:29:13.169306Z" + }, + { + "name": "201602160002.sql", + "sourceDir": "/Users/lukasz/go/src/github.com/lukaszbudnik/migrator/test/migrations/tenants", + "file": "/Users/lukasz/go/src/github.com/lukaszbudnik/migrator/test/migrations/tenants/201602160002.sql", + "migrationType": 2, + "contents": "create table {schema}.module (id integer, id_config integer, foreign key (id_config) references config.config(id));\n", + "checkSum": "56c4c1d8f82f3dedade5116be46267edee01a4889c6359ef03c39dc73ca653a8", + "schema": "xyz", + "appliedAt": "2020-01-01T17:29:13.169306Z" + } +] +``` -# Supported databases +`MigrationDB` JSON contains all the fields from `Migration` struct and adds the following ones: -Currently migrator supports the following databases and their flavours: +* `schema` - schema for which given migration was applied, for single migrations this is equal to source dir name, for multi-tenant ones this is the name of the actual tenant schema +* `appliedAt` - date time migration was applied -* PostgreSQL 9.3+ - schema-based multi-tenant database, with transactions spanning DDL statements, driver used: https://github.com/lib/pq - * PostgreSQL - original PostgreSQL server - * Amazon RDS PostgreSQL - PostgreSQL-compatible relational database built for the cloud - * Amazon Aurora PostgreSQL - PostgreSQL-compatible relational database built for the cloud - * Google CloudSQL PostgreSQL - PostgreSQL-compatible relational database built for the cloud -* MySQL 5.6+ - database-based multi-tenant database, transactions do not span DDL statements, driver used: https://github.com/go-sql-driver/mysql - * MySQL - original MySQL server - * MariaDB - enhanced near linearly scalable multi-master MySQL - * Percona - an enhanced drop-in replacement for MySQL - * Amazon RDS MySQL - MySQL-compatible relational database built for the cloud - * Amazon Aurora MySQL - MySQL-compatible relational database built for the cloud - * Google CloudSQL MySQL - MySQL-compatible relational database built for the cloud -* Microsoft SQL Server 2017 - a relational database management system developed by Microsoft, driver used: https://github.com/denisenkom/go-mssqldb - * Microsoft SQL Server - original Microsoft SQL Server + +## POST /v1/migrations + +Applies new source migrations to DB and returns summary results and a list of applied migrations. + +This operation requires as an input the following JSON payload: + +* `mode` - defines mode in which migrator will execute migrations, valid values are: + * `apply` - applies migrations + * `sync` - synchronises all source migrations with internal migrator's table, this action loads and marks all source migrations as applied but does not apply them + * `dry-run` - instead of calling commit, calls rollback at the end of the operation +* `response` - controls how much information is returned by migrator, valid values are: + * `full` - the response will contain both summary results and a list of applied migrations + * `summary` - the response will contain only summary results + +Sample request: + +``` +curl -v -X POST -H "Content-Type: application/json" -d '{"mode": "apply", "response": "full"}' http://localhost:8080/v1/migrations +``` + +Sample HTTP response: + +``` +{ + "results": { + "startedAt": "2020-01-01T18:29:13.14682+01:00", + "duration": 51637303, + "tenants": 3, + "singleMigrations": 4, + "tenantMigrations": 4, + "tenantMigrationsTotal": 12, + "migrationsGrandTotal": 16, + "singleScripts": 1, + "tenantScripts": 1, + "tenantScriptsTotal": 3, + "scriptsGrandTotal": 4 + }, + "appliedMigrations": [ + { + "name": "201602160001.sql", + "sourceDir": "/Users/lukasz/go/src/github.com/lukaszbudnik/migrator/test/migrations/config", + "file": "/Users/lukasz/go/src/github.com/lukaszbudnik/migrator/test/migrations/config/201602160001.sql", + "migrationType": 1, + "contents": "create schema config;\n", + "checkSum": "c1380af7a054ec75778252f539e1e9f914d2c5b1f441ea1df18c2140c6c3380a" + } + ] +} +``` + +`appliedMigrations` is a list of JSON representation of `Migration` struct as already described above. + +`results` is a JSON representation of `Results` struct. `Results` JSON contains the following fields: + +* `startedAt` - date time the operation started +* `duration` - how long the operation took in nanoseconds +* `tenants` - number of tenants in the system +* `singleMigrations` - number of identified and applied single migrations +* `tenantMigrations` - number of identified tenant migrations +* `tenantMigrationsTotal` - number of all tenant migrations applied (equals to `tenants` * `tenantMigrations`) +* `migrationsGrandTotal` - sum of `singleMigrations` and `tenantMigrationsTotal` +* `singleScripts` - number of read and applied single scripts +* `tenantScripts` - number of read tenant scripts +* `tenantScriptsTotal` - number of all tenant scripts applied (equals to `tenants` * `tenantScripts`) +* `scriptsGrandTotal` - sum of `singleScripts` and `tenantScriptsTotal` + + +## GET /v1/tenants + +Returns list of all tenants. + +Sample request: + +``` +curl -v http://localhost:8080/v1/tenants +``` + +Sample HTTP response: + + +``` +< HTTP/1.1 200 OK +< Content-Type: application/json; charset=utf-8 +< Date: Wed, 01 Jan 2020 17:16:09 GMT +< Content-Length: 58 + +["abc","def","xyz","new_test_tenant_1577793069634018000"] +``` + +## POST /v1/tenants + +Adds a new tenant and applies all tenant migrations and scripts for newly created tenant. Returns summary results and a list of applied migrations. + +This operation requires as an input the following JSON payload: + +* `name` - the name of the new tenant +* `mode` - same as `mode` for [POST /v1/migrations](#post-v1migrations) +* `response` - same as `response` for [POST /v1/migrations](#post-v1migrations) + +Sample request: + +``` +curl -v -X POST -H "Content-Type: application/json" -d '{"name": "new_test_tenant", "mode": "apply", "response": "full"}' http://localhost:8080/v1/tenants +``` + +Sample HTTP response. + +``` +< HTTP/1.1 200 OK +< Content-Type: application/json; charset=utf-8 +< Date: Wed, 01 Jan 2020 17:45:00 GMT +< Transfer-Encoding: chunked + +{ + "results": { + "startedAt": "2020-01-01T18:45:00.174152+01:00", + "duration": 12426788, + "tenants": 1, + "singleMigrations": 0, + "tenantMigrations": 4, + "tenantMigrationsTotal": 4, + "migrationsGrandTotal": 4, + "singleScripts": 0, + "tenantScripts": 1, + "tenantScriptsTotal": 1, + "scriptsGrandTotal": 1 + }, + "appliedMigrations": [ + { + "name": "201602160002.sql", + "sourceDir": "/Users/lukasz/go/src/github.com/lukaszbudnik/migrator/test/migrations/tenants", + "file": "/Users/lukasz/go/src/github.com/lukaszbudnik/migrator/test/migrations/tenants/201602160002.sql", + "migrationType": 2, + "contents": "create table {schema}.module (id integer, id_config integer, foreign key (id_config) references config.config(id));\n", + "checkSum": "56c4c1d8f82f3dedade5116be46267edee01a4889c6359ef03c39dc73ca653a8" + } + ] +} +``` + +The response is identical to the one of [POST /v1/migrations](#post-v1migrations). When adding new tenant only tenant migrations and scripts are applied and only for the newly created tenant. That is why `singleMigrations` and `singleScripts` are always 0 and `tenants` is always 1. + +## Request tracing + +migrator uses request tracing via `X-Request-ID` header. This header can be used with all requests for tracing and/or auditing purposes. If this header is absent migrator will generate one for you. # Quick Start Guide -You can apply your first migrations with migrator in literally a couple of minutes. There are some test migrations which are placed in `test/migrations` directory as well as some docker scripts for setting up test databases. +You can apply your first migrations with migrator in literally a couple of minutes. There are some test migrations which are located in `test` directory as well as some docker scripts for setting up test databases. The quick start guide shows you how to either build the migrator locally or use the official docker image. @@ -172,48 +390,112 @@ go build ./migrator -configFile test/migrator.yaml ``` -> Note: There are 2 git variables injected into the production build (branch/tag and commit sha). When migrator is built like above it prints empty branch/tag and commit sha. This is OK for local development. If you want to inject proper values take a look at `Dockerfile` for details. +> Note: There are 3 git variables injected into the production build (branch/tag together with commit sha & commit date). When migrator is built like above it prints empty branch/tag and commit sha. This is OK for local development. If you want to inject proper values take a look at `Dockerfile` for details. ## 4. Run migrator from official docker image +The official migrator docker image is available on docker hub [lukasz/migrator](https://hub.docker.com/r/lukasz/migrator). + +All migrator releases are automatically available as docker images on docker hub [lukasz/migrator/tags](https://hub.docker.com/r/lukasz/migrator/tags). + +To start a migrator container you need to: + +1. mount a volume with migrations, for example: `/data` +2. specify location of migrator configuration file, for convenience it is usually located under `/data` directory; it defaults to `/data/migrator.yaml` and can be overridden by setting environment variable `MIGRATOR_YAML` + When running migrator from docker we need to update `migrator.yaml` (generated in step 2) as well as provide a link to `migrator-postgres` container: ``` sed -i "s/host=[^ ]* port=[^ ]*/host=migrator-postgres port=5432/g" test/migrator.yaml sed -i "s/baseDir: .*/baseDir: \/data\/migrations/g" test/migrator.yaml -docker run -p 8080:8080 -v $PWD/test:/data -e MIGRATOR_YAML=/data/migrator.yaml -d --link migrator-postgres lukasz/migrator +docker run --name migrator-test -p 8080:8080 -v $PWD/test:/data -e MIGRATOR_YAML=/data/migrator.yaml -d --link migrator-postgres lukasz/migrator ``` -## 4. Play around with migrator +## 5. Play around with migrator Happy path: ``` -curl -v http://localhost:8080/config -curl -v http://localhost:8080/diskMigrations -curl -v http://localhost:8080/tenants -curl -v http://localhost:8080/migrations -curl -v -X POST http://localhost:8080/migrations -curl -v -X POST -H "Content-Type: application/json" -d '{"name": "new_tenant"}' http://localhost:8080/tenants +curl -v http://localhost:8080/v1/config +curl -v http://localhost:8080/v1/migrations/source +curl -v http://localhost:8080/v1/tenants +curl -v http://localhost:8080/v1/migrations/applied +curl -v -X POST -H "Content-Type: application/json" -d '{"mode": "apply", "response": "full"}' http://localhost:8080/v1/migrations +curl -v -X POST -H "Content-Type: application/json" -d '{"name": "new_tenant", "mode": "apply", "response": "full"}' http://localhost:8080/v1/tenants +curl -v http://localhost:8080/v1/migrations/applied ``` And some errors. For example let's break a checksum of the first migration and try to apply migrations or add new tenant. ``` echo " " >> test/migrations/config/201602160001.sql -curl -v -X POST -H "X-Request-Id: xyzpoi098654" http://localhost:8080/migrations -curl -v -X POST -H "Content-Type: application/json" -H "X-Request-Id: abcdef123456" -d '{"name": "new_tenant2"}' http://localhost:8080/tenants +curl -v -X POST -H "Content-Type: application/json" -d '{"mode": "apply", "response": "full"}' http://localhost:8080/v1/migrations +curl -v -X POST -H "Content-Type: application/json" -d '{"name": "new_tenant", "mode": "apply", "response": "full"}' http://localhost:8080/v1/tenants ``` -In above error requests I used optional `X-Request-Id` header. This header can be used with all requests for tracing and/or auditing purposes. +# Configuration + +migrator requires a simple `migrator.yaml` file: -# Customisation +```yaml +# required, base directory where all migrations are stored, see singleSchemas and tenantSchemas below +baseDir: test/migrations +# required, SQL go driver implementation used, see section "Supported databases" +driver: postgres +# required, dataSource format is specific to SQL go driver implementation used, see section "Supported databases" +dataSource: "user=postgres dbname=migrator_test host=192.168.99.100 port=55432 sslmode=disable" +# optional, override only if you have a specific way of determining tenants, default is: +tenantSelectSQL: "select name from migrator.migrator_tenants" +# optional, override only if you have a specific way of creating tenants, default is: +tenantInsertSQL: "insert into migrator.migrator_tenants (name) values ($1)" +# optional, override only if you have a specific schema placeholder, default is: +schemaPlaceHolder: {schema} +# required, directories of single schema SQL migrations, these are subdirectories of baseDir +singleMigrations: + - public + - ref + - config +# optional, directories of tenant schemas SQL migrations, these are subdirectories of baseDir +tenantMigrations: + - tenants +# optional, directories of single SQL scripts which are applied always, these are subdirectories of baseDir +singleScripts: + - config-scripts +# optional, directories of tenant SQL script which are applied always for all tenants, these are subdirectories of baseDir +tenantScripts: + - tenants-scripts +# optional, default is: +port: 8080 +# the webhook configuration section is optional +# URL and template are required if at least one of them is empty noop notifier is used +# the default content type header sent is application/json (can be overridden via webHookHeaders below) +webHookURL: https://your.server.com/services/TTT/BBB/XXX +# should you need more control over HTTP headers use below +webHookHeaders: + - "Authorization: Basic QWxhZGRpbjpPcGVuU2VzYW1l" + - "Content-Type: application/json" + - "X-CustomHeader: value1,value2" +``` + +migrator supports env variables substitution in config file. All patterns matching `${NAME}` will look for env variable `NAME`. Below are some common use cases: + +```yaml +dataSource: "user=${DB_USER} password=${DB_PASSWORD} dbname=${DB_NAME} host=${DB_HOST} port=${DB_PORT}" +webHookHeaders: + - "X-Security-Token: ${SECURITY_TOKEN}" +``` + +# Customisation and legacy frameworks support + +migrator can be used with an already existing legacy DB migration framework. + +## Custom tenants support If you have an existing way of storing information about your tenants you can configure migrator to use it. -In the config file you need to provide 2 parameters: +In the config file you need to provide 2 configuration properties: * `tenantSelectSQL` - a select statement which returns names of the tenants -* `tenantInsertSQL` - an insert statement which creates a new tenant entry, this is called as a prepared statement and is called with the name of the tenant as a parameter; should your table require additional columns you need to provide default values for them +* `tenantInsertSQL` - an insert statement which creates a new tenant entry, the insert statement should be a valid prepared statement for the SQL driver/database you use, it must accept the name of the new tenant as a parameter; finally should your table require additional columns you need to provide default values for them too Here is an example: @@ -222,6 +504,67 @@ tenantSelectSQL: select name from global.customers tenantInsertSQL: insert into global.customers (name, active, date_added) values (?, true, NOW()) ``` +## Custom schema placeholder + +SQL migrations and scripts can use `{schema}` placeholder which will be automatically replaced by migrator with a current schema. For example: + +```sql +create schema if not exists {schema}; +create table if not exists {schema}.modules ( k int, v text ); +insert into {schema}.modules values ( 123, '123' ); +``` + +If you have an existing DB migrations legacy framework which uses different schema placeholder you can override the default one. +In the config file you need to provide `schemaPlaceHolder` configuration property: + +For example: + +```yaml +schemaPlaceHolder: :tenant +``` + +## Synchonising legacy migrations to migrator + +Before switching from a legacy framework to migrator you need to synchronise source migrations to migrator. + +This can be done using the POST /v1/migrations endpoint and setting the `mode` param to `sync`: + +``` +curl -v -X POST -H "Content-Type: application/json" -d '{"mode": "sync", "response": "full"}' http://localhost:8080/v1/migrations +``` + +migrator will load and synchronise all source migrations with internal migrator's table, this action loads and marks all source migrations as applied but does not apply them. + +Once the initial sync is done you can move to migrator for all the consecutive DB migrations. + +## Final comments + +When using migrator please remember that: + +* migrator creates `migrator` schema together with `migrator_migrations` table automatically +* if you're not using [Custom tenants support](#custom-tenants-support) migrator creates `migrator_tenants` table automatically; just like `migrator_migrations` this table is created inside the `migrator` schema +* when adding a new tenant migrator creates a new DB schema and applies all tenant migrations and scripts - no need to apply them manually +* single schemas are not created automatically, you must add initial migration with `create schema {schema}` SQL statement (see examples above) + +# Supported databases + +Currently migrator supports the following databases and their flavours: + +* PostgreSQL 9.3+ - schema-based multi-tenant database, with transactions spanning DDL statements, driver used: https://github.com/lib/pq + * PostgreSQL - original PostgreSQL server + * Amazon RDS PostgreSQL - PostgreSQL-compatible relational database built for the cloud + * Amazon Aurora PostgreSQL - PostgreSQL-compatible relational database built for the cloud + * Google CloudSQL PostgreSQL - PostgreSQL-compatible relational database built for the cloud +* MySQL 5.6+ - database-based multi-tenant database, transactions do not span DDL statements, driver used: https://github.com/go-sql-driver/mysql + * MySQL - original MySQL server + * MariaDB - enhanced near linearly scalable multi-master MySQL + * Percona - an enhanced drop-in replacement for MySQL + * Amazon RDS MySQL - MySQL-compatible relational database built for the cloud + * Amazon Aurora MySQL - MySQL-compatible relational database built for the cloud + * Google CloudSQL MySQL - MySQL-compatible relational database built for the cloud +* Microsoft SQL Server 2017 - a relational database management system developed by Microsoft, driver used: https://github.com/denisenkom/go-mssqldb + * Microsoft SQL Server - original Microsoft SQL Server + # Performance As a benchmarks I used 2 migrations frameworks: @@ -246,6 +589,10 @@ flyway results are... very surprising. I was so shocked that I had to re-run fly The other thing to consider is the fact that migrator is written in go which is known to be much faster than Ruby and Java. +# Change log + +Please navigate to [migrator/releases](https://github.com/lukaszbudnik/migrator/releases) for a complete list of versions, features, and change log. + # Contributing, code style, running unit & integration tests Contributions are most welcomed. @@ -263,7 +610,7 @@ The `ultimate-coverage.sh` script loops through 5 different containers (3 MySQL # License -Copyright 2016-2019 Łukasz Budnik +Copyright 2016-2020 Łukasz Budnik Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at diff --git a/TestDockerfile b/TestDockerfile index 4246a44..35c2152 100644 --- a/TestDockerfile +++ b/TestDockerfile @@ -1,9 +1,9 @@ -FROM golang:1.11.2-alpine3.8 as builder +FROM golang:1.13.5-alpine3.10 as builder MAINTAINER Łukasz Budnik lukasz.budnik@gmail.com -# use "--build-arg SOURCE_BRANCH=migrator-v3" to override at build time -# docker build -f TestDockerfile --build-arg SOURCE_BRANCH=migrator-v3 -t migratortest:v1 . +# use "--build-arg SOURCE_BRANCH=dev" to override at build time +# docker build -f TestDockerfile --build-arg SOURCE_BRANCH=dev -t migrator-local:dev . ARG SOURCE_BRANCH=master # git is required @@ -25,7 +25,7 @@ RUN cd /go/src/github.com/lukaszbudnik/migrator && \ GIT_COMMIT_SHA=$(git rev-list -1 HEAD) && \ go build -ldflags "-X main.GitCommitDate=$GIT_COMMIT_DATE -X main.GitCommitSha=$GIT_COMMIT_SHA -X main.GitBranch=$GIT_BRANCH" -FROM alpine:3.8 +FROM alpine:3.10 COPY --from=builder /go/src/github.com/lukaszbudnik/migrator/migrator /bin VOLUME ["/data"] diff --git a/common/common.go b/common/common.go index 534e466..f8a9724 100644 --- a/common/common.go +++ b/common/common.go @@ -4,14 +4,12 @@ import ( "context" "fmt" "log" + "runtime" ) -// RequestIDKey is used together with context for setting/getting X-Request-Id +// RequestIDKey is used together with context for setting/getting X-Request-ID type RequestIDKey struct{} -// ActionKey is used together with context for setting/getting current action -type ActionKey struct{} - // LogError logs error message func LogError(ctx context.Context, format string, a ...interface{}) string { return logLevel(ctx, "ERROR", format, a...) @@ -22,16 +20,29 @@ func LogInfo(ctx context.Context, format string, a ...interface{}) string { return logLevel(ctx, "INFO", format, a...) } -// LogPanic logs error message and panics +// LogPanic logs error message func LogPanic(ctx context.Context, format string, a ...interface{}) string { - message := logLevel(ctx, "PANIC", format, a...) - panic(message) + return logLevel(ctx, "PANIC", format, a...) +} + +// Log logs message with a given level with no request context +func Log(level string, format string, a ...interface{}) string { + _, file, line, _ := runtime.Caller(2) + + message := fmt.Sprintf(format, a...) + + log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds | log.LUTC) + log.Printf("[%v:%v] %v %v", file, line, level, message) + return message } func logLevel(ctx context.Context, level string, format string, a ...interface{}) string { + _, file, line, _ := runtime.Caller(2) + requestID := ctx.Value(RequestIDKey{}) - action := ctx.Value(ActionKey{}) message := fmt.Sprintf(format, a...) - log.Printf("%v %v [%v] - %v", level, action, requestID, message) + + log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds | log.LUTC) + log.Printf("[%v:%v] %v requestId=%v %v", file, line, level, requestID, message) return message } diff --git a/common/common_test.go b/common/common_test.go index 91e29be..b40560d 100644 --- a/common/common_test.go +++ b/common/common_test.go @@ -2,35 +2,33 @@ package common import ( "context" - "runtime" - "strings" "testing" "github.com/stretchr/testify/assert" ) func newTestContext() context.Context { - pc, _, _, _ := runtime.Caller(1) - details := runtime.FuncForPC(pc) - ctx := context.TODO() ctx = context.WithValue(ctx, RequestIDKey{}, "123") - ctx = context.WithValue(ctx, ActionKey{}, strings.Replace(details.Name(), "github.com/lukaszbudnik/migrator/common.", "", -1)) return ctx } func TestLogInfo(t *testing.T) { - message := LogInfo(newTestContext(), "format no params") - assert.Equal(t, "format no params", message) + message := LogInfo(newTestContext(), "success") + assert.Equal(t, "success", message) } func TestLogError(t *testing.T) { - message := LogError(newTestContext(), "format no params: %v", 123) - assert.Equal(t, "format no params: 123", message) + message := LogError(newTestContext(), "param=%v", 123) + assert.Equal(t, "param=123", message) } func TestLogPanic(t *testing.T) { - assert.Panics(t, func() { - LogPanic(newTestContext(), "format no params: %v", 123) - }) + message := LogPanic(newTestContext(), "param=%v", 123456) + assert.Equal(t, "param=123456", message) +} + +func TestLog(t *testing.T) { + message := Log("INFO", "param=%v", 456) + assert.Equal(t, "param=456", message) } diff --git a/config/config.go b/config/config.go index fc67098..1cbe7e7 100644 --- a/config/config.go +++ b/config/config.go @@ -6,15 +6,15 @@ import ( "reflect" "strings" - "gopkg.in/validator.v2" + "github.com/go-playground/validator" "gopkg.in/yaml.v2" ) // Config represents Migrator's yaml configuration file type Config struct { - BaseDir string `yaml:"baseDir" validate:"nonzero"` - Driver string `yaml:"driver" validate:"nonzero"` - DataSource string `yaml:"dataSource" validate:"nonzero"` + BaseDir string `yaml:"baseDir" validate:"required"` + Driver string `yaml:"driver" validate:"required"` + DataSource string `yaml:"dataSource" validate:"required"` TenantSelectSQL string `yaml:"tenantSelectSQL,omitempty"` TenantInsertSQL string `yaml:"tenantInsertSQL,omitempty"` SchemaPlaceHolder string `yaml:"schemaPlaceHolder,omitempty"` @@ -24,7 +24,6 @@ type Config struct { TenantScripts []string `yaml:"tenantScripts,omitempty"` Port string `yaml:"port,omitempty"` WebHookURL string `yaml:"webHookURL,omitempty"` - WebHookTemplate string `yaml:"webHookTemplate,omitempty"` WebHookHeaders []string `yaml:"webHookHeaders,omitempty"` } @@ -52,7 +51,8 @@ func FromBytes(contents []byte) (*Config, error) { return nil, err } - if err := validator.Validate(config); err != nil { + validate := validator.New() + if err := validate.Struct(config); err != nil { return nil, err } diff --git a/config/config_test.go b/config/config_test.go index 4da7b67..a6a45ff 100644 --- a/config/config_test.go +++ b/config/config_test.go @@ -2,21 +2,14 @@ package config import ( "fmt" - "io/ioutil" - "log" "os" "testing" + "github.com/go-playground/validator" "github.com/stretchr/testify/assert" - "gopkg.in/validator.v2" "gopkg.in/yaml.v2" ) -func noopLogger() *log.Logger { - log := log.New(ioutil.Discard, "", 0) - return log -} - func TestFromFile(t *testing.T) { config, err := FromFile("../test/migrator-test.yaml") assert.Nil(t, err) @@ -29,7 +22,6 @@ func TestFromFile(t *testing.T) { assert.Equal(t, "8811", config.Port) assert.Equal(t, "{schema}", config.SchemaPlaceHolder) assert.Equal(t, "https://slack.com/api/api.test", config.WebHookURL) - assert.Equal(t, `{"text": "{text}","icon_emoji": ":white_check_mark:"}`, config.WebHookTemplate) assert.Equal(t, []string{"Authorization: Basic QWxhZGRpbjpPcGVuU2VzYW1l", "Content-Type: application/json", "X-CustomHeader: value1,value2"}, config.WebHookHeaders) } @@ -46,12 +38,11 @@ func TestWithEnvFromFile(t *testing.T) { assert.Equal(t, []string{"tenants"}, config.TenantMigrations) assert.Equal(t, []string{"public", "ref", "config"}, config.SingleMigrations) assert.Equal(t, os.Getenv("SHLVL"), config.WebHookURL) - assert.Equal(t, os.Getenv("TERM"), config.WebHookTemplate) assert.Equal(t, fmt.Sprintf("X-Security-Token: %v", os.Getenv("USER")), config.WebHookHeaders[0]) } func TestConfigString(t *testing.T) { - config := &Config{"/opt/app/migrations", "postgres", "user=p dbname=db host=localhost", "select abc", "insert into table", ":tenant", []string{"ref"}, []string{"tenants"}, []string{"procedures"}, []string{}, "8181", "https://hooks.slack.com/services/TTT/BBB/XXX", "{json: text}", []string{}} + config := &Config{"/opt/app/migrations", "postgres", "user=p dbname=db host=localhost", "select abc", "insert into table", ":tenant", []string{"ref"}, []string{"tenants"}, []string{"procedures"}, []string{}, "8181", "https://hooks.slack.com/services/TTT/BBB/XXX", []string{}} // check if go naming convention applies expected := `baseDir: /opt/app/migrations driver: postgres @@ -66,8 +57,7 @@ tenantMigrations: singleScripts: - procedures port: "8181" -webHookURL: https://hooks.slack.com/services/TTT/BBB/XXX -webHookTemplate: '{json: text}'` +webHookURL: https://hooks.slack.com/services/TTT/BBB/XXX` actual := fmt.Sprintf("%v", config) assert.Equal(t, expected, actual) } @@ -75,7 +65,7 @@ webHookTemplate: '{json: text}'` func TestConfigReadFromEmptyFileError(t *testing.T) { config, err := FromFile("../test/empty.yaml") assert.Nil(t, config) - assert.IsType(t, (validator.ErrorMap)(nil), err, "Should error because of validation errors") + assert.IsType(t, (validator.ValidationErrors)(nil), err, "Should error because of validation errors") } func TestConfigReadFromNonExistingFileError(t *testing.T) { @@ -85,7 +75,7 @@ func TestConfigReadFromNonExistingFileError(t *testing.T) { } func TestConfigFromWrongSyntaxFile(t *testing.T) { - config, err := FromFile("../README.md") + config, err := FromFile("../Dockerfile") assert.Nil(t, config) - assert.IsType(t, (*yaml.TypeError)(nil), err, "Should panic because of wrong yaml syntax") + assert.IsType(t, (*yaml.TypeError)(nil), err, "Should error because of wrong yaml syntax") } diff --git a/coordinator/coordinator.go b/coordinator/coordinator.go new file mode 100644 index 0000000..a4bca1f --- /dev/null +++ b/coordinator/coordinator.go @@ -0,0 +1,233 @@ +package coordinator + +import ( + "context" + "encoding/json" + "sync" + + "github.com/lukaszbudnik/migrator/common" + "github.com/lukaszbudnik/migrator/config" + "github.com/lukaszbudnik/migrator/db" + "github.com/lukaszbudnik/migrator/loader" + "github.com/lukaszbudnik/migrator/notifications" + "github.com/lukaszbudnik/migrator/types" +) + +// Coordinator interface abstracts all operations performed by migrator +type Coordinator interface { + GetTenants() []string + GetSourceMigrations() []types.Migration + GetAppliedMigrations() []types.MigrationDB + VerifySourceMigrationsCheckSums() (bool, []types.Migration) + ApplyMigrations(types.MigrationsModeType) (*types.MigrationResults, []types.Migration) + AddTenantAndApplyMigrations(types.MigrationsModeType, string) (*types.MigrationResults, []types.Migration) + Dispose() +} + +// coordinator struct is a struct for implementing DB specific dialects +type coordinator struct { + ctx context.Context + connector db.Connector + loader loader.Loader + notifier notifications.Notifier + config *config.Config + tenants []string + sourceMigrations []types.Migration + appliedMigrations []types.MigrationDB + loaderLock sync.Mutex + connectorLock sync.Mutex +} + +// Factory creates new Coordinator instance +type Factory func(context.Context, *config.Config) Coordinator + +// New creates instance of Coordinator +func New(ctx context.Context, config *config.Config, newConnector db.Factory, newLoader loader.Factory, newNotifier notifications.Factory) Coordinator { + connector := newConnector(ctx, config) + loader := newLoader(ctx, config) + notifier := newNotifier(ctx, config) + coordinator := &coordinator{ + connector: connector, + loader: loader, + notifier: notifier, + config: config, + ctx: ctx, + } + return coordinator +} + +func (c *coordinator) GetTenants() []string { + c.connectorLock.Lock() + defer c.connectorLock.Unlock() + if c.tenants == nil { + tenants := c.connector.GetTenants() + c.tenants = tenants + } + return c.tenants +} + +func (c *coordinator) GetSourceMigrations() []types.Migration { + c.loaderLock.Lock() + defer c.loaderLock.Unlock() + if c.sourceMigrations == nil { + sourceMigrations := c.loader.GetSourceMigrations() + c.sourceMigrations = sourceMigrations + } + return c.sourceMigrations +} + +func (c *coordinator) GetAppliedMigrations() []types.MigrationDB { + c.connectorLock.Lock() + defer c.connectorLock.Unlock() + if c.appliedMigrations == nil { + appliedMigrations := c.connector.GetAppliedMigrations() + c.appliedMigrations = appliedMigrations + } + return c.appliedMigrations +} + +// VerifySourceMigrationsCheckSums verifies if CheckSum of disk and flattened DB migrations match +// returns bool indicating if offending (i.e., modified) disk migrations were found +// if bool is false the function returns a slice of offending migrations +// if bool is true the slice of effending migrations is empty +func (c *coordinator) VerifySourceMigrationsCheckSums() (bool, []types.Migration) { + sourceMigrations := c.GetSourceMigrations() + appliedMigrations := c.GetAppliedMigrations() + + flattenedAppliedMigration := c.flattenAppliedMigrations(appliedMigrations) + + intersect := c.intersect(sourceMigrations, flattenedAppliedMigration) + + var offendingMigrations []types.Migration + var result = true + for _, t := range intersect { + if t.source.CheckSum != t.applied.CheckSum { + offendingMigrations = append(offendingMigrations, t.source) + result = false + } + } + return result, offendingMigrations +} + +func (c *coordinator) ApplyMigrations(mode types.MigrationsModeType) (*types.MigrationResults, []types.Migration) { + sourceMigrations := c.GetSourceMigrations() + appliedMigrations := c.GetAppliedMigrations() + + migrationsToApply := c.computeMigrationsToApply(sourceMigrations, appliedMigrations) + common.LogInfo(c.ctx, "Found migrations to apply: %d", len(migrationsToApply)) + + results := c.connector.ApplyMigrations(mode, migrationsToApply) + + c.sendNotification(results) + + return results, migrationsToApply +} + +func (c *coordinator) AddTenantAndApplyMigrations(mode types.MigrationsModeType, tenant string) (*types.MigrationResults, []types.Migration) { + sourceMigrations := c.GetSourceMigrations() + + // filter only tenant schemas + migrationsToApply := c.filterTenantMigrations(sourceMigrations) + common.LogInfo(c.ctx, "Migrations to apply for new tenant: %d", len(migrationsToApply)) + + results := c.connector.AddTenantAndApplyMigrations(mode, tenant, migrationsToApply) + + c.sendNotification(results) + + return results, migrationsToApply +} + +func (c *coordinator) Dispose() { + c.connector.Dispose() +} + +func (c *coordinator) flattenAppliedMigrations(appliedMigrations []types.MigrationDB) []types.Migration { + var flattened []types.Migration + var previousMigration types.Migration + for i, m := range appliedMigrations { + if i == 0 || m.Migration != previousMigration { + flattened = append(flattened, m.Migration) + previousMigration = m.Migration + } + } + return flattened +} + +// intersect returns the elements from source and applied +func (c *coordinator) intersect(sourceMigrations []types.Migration, flattenedAppliedMigrations []types.Migration) []struct { + source types.Migration + applied types.Migration +} { + // key is Migration.File + existsInDB := map[string]types.Migration{} + for _, m := range flattenedAppliedMigrations { + existsInDB[m.File] = m + } + intersect := []struct { + source types.Migration + applied types.Migration + }{} + for _, m := range sourceMigrations { + if db, ok := existsInDB[m.File]; ok { + intersect = append(intersect, struct { + source types.Migration + applied types.Migration + }{m, db}) + } + } + return intersect +} + +// difference returns the elements on disk which are not yet in DB +// the exceptions are MigrationTypeSingleScript and MigrationTypeTenantScript which are always run +func (c *coordinator) difference(sourceMigrations []types.Migration, flattenedAppliedMigrations []types.Migration) []types.Migration { + // key is Migration.File + existsInDB := map[string]bool{} + for _, m := range flattenedAppliedMigrations { + if m.MigrationType != types.MigrationTypeSingleScript && m.MigrationType != types.MigrationTypeTenantScript { + existsInDB[m.File] = true + } + } + diff := []types.Migration{} + for _, m := range sourceMigrations { + if _, ok := existsInDB[m.File]; !ok { + diff = append(diff, m) + } + } + return diff +} + +// computeMigrationsToApply computes which source migrations should be applied to DB based on migrations already present in DB +func (c *coordinator) computeMigrationsToApply(sourceMigrations []types.Migration, appliedMigrations []types.MigrationDB) []types.Migration { + flattenedAppliedMigrations := c.flattenAppliedMigrations(appliedMigrations) + + len := len(flattenedAppliedMigrations) + common.LogInfo(c.ctx, "Number of flattened DB migrations: %d", len) + + out := c.difference(sourceMigrations, flattenedAppliedMigrations) + return out +} + +// filterTenantMigrations returns only migrations which are of type MigrationTypeTenantSchema +func (c *coordinator) filterTenantMigrations(sourceMigrations []types.Migration) []types.Migration { + filteredTenantMigrations := []types.Migration{} + for _, m := range sourceMigrations { + if m.MigrationType == types.MigrationTypeTenantMigration || m.MigrationType == types.MigrationTypeTenantScript { + filteredTenantMigrations = append(filteredTenantMigrations, m) + } + } + + return filteredTenantMigrations +} + +// errors are silently discarded, adding tenant or applying migrations +// must not fail because of notification error +func (c *coordinator) sendNotification(results *types.MigrationResults) { + bytes, _ := json.Marshal(results) + text := string(bytes) + if resp, err := c.notifier.Notify(text); err != nil { + common.LogError(c.ctx, "Notifier error: %v", err.Error()) + } else { + common.LogInfo(c.ctx, "Notifier response: %v", resp) + } +} diff --git a/coordinator/coordinator_mocks.go b/coordinator/coordinator_mocks.go new file mode 100644 index 0000000..fc6076e --- /dev/null +++ b/coordinator/coordinator_mocks.go @@ -0,0 +1,76 @@ +package coordinator + +import ( + "context" + "time" + + "github.com/lukaszbudnik/migrator/config" + "github.com/lukaszbudnik/migrator/db" + "github.com/lukaszbudnik/migrator/loader" + "github.com/lukaszbudnik/migrator/notifications" + "github.com/lukaszbudnik/migrator/types" +) + +type mockedDiskLoader struct { +} + +func (m *mockedDiskLoader) GetSourceMigrations() []types.Migration { + m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleMigration, Contents: "select abc"} + m2 := types.Migration{Name: "201602220001.sql", SourceDir: "source", File: "source/201602220001.sql", MigrationType: types.MigrationTypeTenantMigration, Contents: "select def"} + return []types.Migration{m1, m2} +} + +func newMockedDiskLoader(_ context.Context, _ *config.Config) loader.Loader { + return &mockedDiskLoader{} +} + +type mockedNotifier struct{} + +func (m *mockedNotifier) Notify(message string) (string, error) { + return "mock", nil +} + +func newMockedNotifier(_ context.Context, _ *config.Config) notifications.Notifier { + return &mockedNotifier{} +} + +type mockedBrokenCheckSumDiskLoader struct { +} + +func (m *mockedBrokenCheckSumDiskLoader) GetSourceMigrations() []types.Migration { + m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleMigration, Contents: "select abc", CheckSum: "xxx"} + return []types.Migration{m1} +} + +func newBrokenCheckSumMockedDiskLoader(_ context.Context, _ *config.Config) loader.Loader { + return new(mockedBrokenCheckSumDiskLoader) +} + +type mockedConnector struct { +} + +func (m *mockedConnector) Dispose() { +} + +func (m *mockedConnector) AddTenantAndApplyMigrations(types.MigrationsModeType, string, []types.Migration) *types.MigrationResults { + return &types.MigrationResults{} +} + +func (m *mockedConnector) GetTenants() []string { + return []string{"a", "b", "c"} +} + +func (m *mockedConnector) GetAppliedMigrations() []types.MigrationDB { + m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleMigration, Contents: "select abc"} + d1 := time.Date(2016, 02, 22, 16, 41, 1, 123, time.UTC) + ms := []types.MigrationDB{{Migration: m1, Schema: "source", AppliedAt: d1}} + return ms +} + +func (m *mockedConnector) ApplyMigrations(types.MigrationsModeType, []types.Migration) *types.MigrationResults { + return &types.MigrationResults{} +} + +func newMockedConnector(context.Context, *config.Config) db.Connector { + return &mockedConnector{} +} diff --git a/migrations/migrations_test.go b/coordinator/coordinator_test.go similarity index 60% rename from migrations/migrations_test.go rename to coordinator/coordinator_test.go index 84bc40e..9164a64 100644 --- a/migrations/migrations_test.go +++ b/coordinator/coordinator_test.go @@ -1,4 +1,4 @@ -package migrations +package coordinator import ( "context" @@ -11,65 +11,72 @@ import ( func TestMigrationsFlattenMigrationDBs1(t *testing.T) { m1 := types.Migration{Name: "001.sql", SourceDir: "public", File: "public/001.sql", MigrationType: types.MigrationTypeSingleMigration} - db1 := types.MigrationDB{Migration: m1, Schema: "public", Created: time.Now()} + db1 := types.MigrationDB{Migration: m1, Schema: "public", AppliedAt: time.Now()} m2 := types.Migration{Name: "002.sql", SourceDir: "tenants", File: "tenants/002.sql", MigrationType: types.MigrationTypeTenantMigration} - db2 := types.MigrationDB{Migration: m2, Schema: "abc", Created: time.Now()} + db2 := types.MigrationDB{Migration: m2, Schema: "abc", AppliedAt: time.Now()} - db3 := types.MigrationDB{Migration: m2, Schema: "def", Created: time.Now()} + db3 := types.MigrationDB{Migration: m2, Schema: "def", AppliedAt: time.Now()} m4 := types.Migration{Name: "003.sql", SourceDir: "ref", File: "ref/003.sql", MigrationType: types.MigrationTypeSingleMigration} - db4 := types.MigrationDB{Migration: m4, Schema: "ref", Created: time.Now()} + db4 := types.MigrationDB{Migration: m4, Schema: "ref", AppliedAt: time.Now()} dbs := []types.MigrationDB{db1, db2, db3, db4} - migrations := flattenMigrationDBs(dbs) + coordinator := &coordinator{ + connector: newMockedConnector(context.TODO(), nil), + loader: newMockedDiskLoader(context.TODO(), nil), + notifier: newMockedNotifier(context.TODO(), nil), + } + migrations := coordinator.flattenAppliedMigrations(dbs) assert.Equal(t, []types.Migration{m1, m2, m4}, migrations) } func TestMigrationsFlattenMigrationDBs2(t *testing.T) { m2 := types.Migration{Name: "002.sql", SourceDir: "tenants", File: "tenants/002.sql", MigrationType: types.MigrationTypeTenantMigration} - db2 := types.MigrationDB{Migration: m2, Schema: "abc", Created: time.Now()} + db2 := types.MigrationDB{Migration: m2, Schema: "abc", AppliedAt: time.Now()} - db3 := types.MigrationDB{Migration: m2, Schema: "def", Created: time.Now()} + db3 := types.MigrationDB{Migration: m2, Schema: "def", AppliedAt: time.Now()} m4 := types.Migration{Name: "003.sql", SourceDir: "ref", File: "ref/003.sql", MigrationType: types.MigrationTypeSingleMigration} - db4 := types.MigrationDB{Migration: m4, Schema: "ref", Created: time.Now()} + db4 := types.MigrationDB{Migration: m4, Schema: "ref", AppliedAt: time.Now()} dbs := []types.MigrationDB{db2, db3, db4} - migrations := flattenMigrationDBs(dbs) + coordinator := &coordinator{} + migrations := coordinator.flattenAppliedMigrations(dbs) assert.Equal(t, []types.Migration{m2, m4}, migrations) } func TestMigrationsFlattenMigrationDBs3(t *testing.T) { m1 := types.Migration{Name: "001.sql", SourceDir: "public", File: "public/001.sql", MigrationType: types.MigrationTypeSingleMigration} - db1 := types.MigrationDB{Migration: m1, Schema: "public", Created: time.Now()} + db1 := types.MigrationDB{Migration: m1, Schema: "public", AppliedAt: time.Now()} m2 := types.Migration{Name: "002.sql", SourceDir: "tenants", File: "tenants/002.sql", MigrationType: types.MigrationTypeTenantMigration} - db2 := types.MigrationDB{Migration: m2, Schema: "abc", Created: time.Now()} + db2 := types.MigrationDB{Migration: m2, Schema: "abc", AppliedAt: time.Now()} - db3 := types.MigrationDB{Migration: m2, Schema: "def", Created: time.Now()} + db3 := types.MigrationDB{Migration: m2, Schema: "def", AppliedAt: time.Now()} m4 := types.Migration{Name: "003.sql", SourceDir: "ref", File: "ref/003.sql", MigrationType: types.MigrationTypeSingleMigration} - db4 := types.MigrationDB{Migration: m4, Schema: "ref", Created: time.Now()} + db4 := types.MigrationDB{Migration: m4, Schema: "ref", AppliedAt: time.Now()} m5 := types.Migration{Name: "global-stored-procedure1.sql", SourceDir: "public", File: "public-scripts/global-stored-procedure1.sql", MigrationType: types.MigrationTypeSingleScript} - db5 := types.MigrationDB{Migration: m5, Schema: "public", Created: time.Now()} + db5 := types.MigrationDB{Migration: m5, Schema: "public", AppliedAt: time.Now()} m6 := types.Migration{Name: "global-stored-procedure2.sql", SourceDir: "public", File: "public-scripts/global-stored-procedure2sql", MigrationType: types.MigrationTypeSingleScript} - db6 := types.MigrationDB{Migration: m6, Schema: "public", Created: time.Now()} + db6 := types.MigrationDB{Migration: m6, Schema: "public", AppliedAt: time.Now()} m7 := types.Migration{Name: "002.sql", SourceDir: "tenants-scripts", File: "tenants/002.sql", MigrationType: types.MigrationTypeTenantMigration} - db7 := types.MigrationDB{Migration: m7, Schema: "abc", Created: time.Now()} + db7 := types.MigrationDB{Migration: m7, Schema: "abc", AppliedAt: time.Now()} - db8 := types.MigrationDB{Migration: m7, Schema: "def", Created: time.Now()} + db8 := types.MigrationDB{Migration: m7, Schema: "def", AppliedAt: time.Now()} dbs := []types.MigrationDB{db1, db2, db3, db4, db5, db6, db7, db8} - migrations := flattenMigrationDBs(dbs) + coordinator := &coordinator{} + migrations := coordinator.flattenAppliedMigrations(dbs) assert.Equal(t, []types.Migration{m1, m2, m4, m5, m6, m7}, migrations) } @@ -83,11 +90,16 @@ func TestComputeMigrationsToApply(t *testing.T) { mdef6 := types.Migration{Name: "f", SourceDir: "f", File: "f", MigrationType: types.MigrationTypeSingleScript} mdef7 := types.Migration{Name: "g", SourceDir: "g", File: "g", MigrationType: types.MigrationTypeTenantScript} - // TODO add 2 public scripts and 1 tenant script - diskMigrations := []types.Migration{mdef1, mdef2, mdef3, mdef4, mdef5, mdef6, mdef7} - dbMigrations := []types.MigrationDB{{Migration: mdef1, Schema: "a", Created: time.Now()}, {Migration: mdef2, Schema: "abc", Created: time.Now()}, {Migration: mdef2, Schema: "def", Created: time.Now()}, {Migration: mdef5, Schema: "e", Created: time.Now()}, {Migration: mdef6, Schema: "f", Created: time.Now()}, {Migration: mdef7, Schema: "abc", Created: time.Now()}, {Migration: mdef7, Schema: "def", Created: time.Now()}} - migrations := ComputeMigrationsToApply(context.TODO(), diskMigrations, dbMigrations) + dbMigrations := []types.MigrationDB{{Migration: mdef1, Schema: "a", AppliedAt: time.Now()}, {Migration: mdef2, Schema: "abc", AppliedAt: time.Now()}, {Migration: mdef2, Schema: "def", AppliedAt: time.Now()}, {Migration: mdef5, Schema: "e", AppliedAt: time.Now()}, {Migration: mdef6, Schema: "f", AppliedAt: time.Now()}, {Migration: mdef7, Schema: "abc", AppliedAt: time.Now()}, {Migration: mdef7, Schema: "def", AppliedAt: time.Now()}} + + coordinator := &coordinator{ + ctx: context.TODO(), + connector: newMockedConnector(context.TODO(), nil), + loader: newMockedDiskLoader(context.TODO(), nil), + notifier: newMockedNotifier(context.TODO(), nil), + } + migrations := coordinator.computeMigrationsToApply(diskMigrations, dbMigrations) // that should be 5 now... assert.Len(t, migrations, 5) @@ -101,7 +113,16 @@ func TestComputeMigrationsToApply(t *testing.T) { assert.Equal(t, "g", migrations[4].File) } -func TestFilterTenantMigrations(t *testing.T) { +func TestComputeMigrationsToApplyDifferentTimestamps(t *testing.T) { + // use case: + // development done in parallel, 2 devs fork from master + // dev1 adds migrations on Monday + // dev2 adds migrations on Tuesday + // dev2 merges and deploys his code on Tuesday + // dev1 merges and deploys his code on Wednesday + // migrator should detect dev1 migrations + // previous implementation relied only on counts and such migration was not applied + mdef1 := types.Migration{Name: "20181111", SourceDir: "tenants", File: "tenants/20181111", MigrationType: types.MigrationTypeTenantMigration} mdef2 := types.Migration{Name: "20181111", SourceDir: "public", File: "public/20181111", MigrationType: types.MigrationTypeSingleMigration} mdef3 := types.Migration{Name: "20181112", SourceDir: "public", File: "public/20181112", MigrationType: types.MigrationTypeSingleMigration} @@ -114,28 +135,24 @@ func TestFilterTenantMigrations(t *testing.T) { dev2p := types.Migration{Name: "20181120", SourceDir: "public", File: "public/20181120", MigrationType: types.MigrationTypeSingleMigration} diskMigrations := []types.Migration{mdef1, mdef2, mdef3, dev1, dev1p1, dev1p2, dev2, dev2p} - migrations := FilterTenantMigrations(context.TODO(), diskMigrations) + dbMigrations := []types.MigrationDB{{Migration: mdef1, Schema: "abc", AppliedAt: time.Now()}, {Migration: mdef1, Schema: "def", AppliedAt: time.Now()}, {Migration: mdef2, Schema: "public", AppliedAt: time.Now()}, {Migration: mdef3, Schema: "public", AppliedAt: time.Now()}, {Migration: dev2, Schema: "abc", AppliedAt: time.Now()}, {Migration: dev2, Schema: "def", AppliedAt: time.Now()}, {Migration: dev2p, Schema: "public", AppliedAt: time.Now()}} + + coordinator := &coordinator{ + ctx: context.TODO(), + connector: newMockedConnector(context.TODO(), nil), + loader: newMockedDiskLoader(context.TODO(), nil), + notifier: newMockedNotifier(context.TODO(), nil), + } + migrations := coordinator.computeMigrationsToApply(diskMigrations, dbMigrations) assert.Len(t, migrations, 3) - assert.Equal(t, mdef1.File, migrations[0].File) - assert.Equal(t, types.MigrationTypeTenantMigration, migrations[0].MigrationType) - assert.Equal(t, dev1.File, migrations[1].File) - assert.Equal(t, types.MigrationTypeTenantMigration, migrations[1].MigrationType) - assert.Equal(t, dev2.File, migrations[2].File) - assert.Equal(t, types.MigrationTypeTenantMigration, migrations[2].MigrationType) + assert.Equal(t, dev1.File, migrations[0].File) + assert.Equal(t, dev1p1.File, migrations[1].File) + assert.Equal(t, dev1p2.File, migrations[2].File) } -func TestComputeMigrationsToApplyDifferentTimestamps(t *testing.T) { - // use case: - // development done in parallel, 2 devs fork from master - // dev1 adds migrations on Monday - // dev2 adds migrations on Tuesday - // dev2 merges and deploys his code on Tuesday - // dev1 merges and deploys his code on Wednesday - // migrator should detect dev1 migrations - // previous implementation relied only on counts and such migration was not applied - +func TestFilterTenantMigrations(t *testing.T) { mdef1 := types.Migration{Name: "20181111", SourceDir: "tenants", File: "tenants/20181111", MigrationType: types.MigrationTypeTenantMigration} mdef2 := types.Migration{Name: "20181111", SourceDir: "public", File: "public/20181111", MigrationType: types.MigrationTypeSingleMigration} mdef3 := types.Migration{Name: "20181112", SourceDir: "public", File: "public/20181112", MigrationType: types.MigrationTypeSingleMigration} @@ -147,15 +164,29 @@ func TestComputeMigrationsToApplyDifferentTimestamps(t *testing.T) { dev2 := types.Migration{Name: "20181120", SourceDir: "tenants", File: "tenants/20181120", MigrationType: types.MigrationTypeTenantMigration} dev2p := types.Migration{Name: "20181120", SourceDir: "public", File: "public/20181120", MigrationType: types.MigrationTypeSingleMigration} - diskMigrations := []types.Migration{mdef1, mdef2, mdef3, dev1, dev1p1, dev1p2, dev2, dev2p} - dbMigrations := []types.MigrationDB{{Migration: mdef1, Schema: "abc", Created: time.Now()}, {Migration: mdef1, Schema: "def", Created: time.Now()}, {Migration: mdef2, Schema: "public", Created: time.Now()}, {Migration: mdef3, Schema: "public", Created: time.Now()}, {Migration: dev2, Schema: "abc", Created: time.Now()}, {Migration: dev2, Schema: "def", Created: time.Now()}, {Migration: dev2p, Schema: "public", Created: time.Now()}} - migrations := ComputeMigrationsToApply(context.TODO(), diskMigrations, dbMigrations) + script := types.Migration{Name: "20181120", SourceDir: "tenants-script", File: "tenants/20181120", MigrationType: types.MigrationTypeTenantScript} + scriptp := types.Migration{Name: "20181120", SourceDir: "public-script", File: "public/20181120", MigrationType: types.MigrationTypeSingleScript} - assert.Len(t, migrations, 3) + diskMigrations := []types.Migration{mdef1, mdef2, mdef3, dev1, dev1p1, dev1p2, dev2, dev2p, script, scriptp} - assert.Equal(t, dev1.File, migrations[0].File) - assert.Equal(t, dev1p1.File, migrations[1].File) - assert.Equal(t, dev1p2.File, migrations[2].File) + coordinator := &coordinator{ + ctx: context.TODO(), + connector: newMockedConnector(context.TODO(), nil), + loader: newMockedDiskLoader(context.TODO(), nil), + notifier: newMockedNotifier(context.TODO(), nil), + } + migrations := coordinator.filterTenantMigrations(diskMigrations) + + assert.Len(t, migrations, 4) + + assert.Equal(t, mdef1.File, migrations[0].File) + assert.Equal(t, types.MigrationTypeTenantMigration, migrations[0].MigrationType) + assert.Equal(t, dev1.File, migrations[1].File) + assert.Equal(t, types.MigrationTypeTenantMigration, migrations[1].MigrationType) + assert.Equal(t, dev2.File, migrations[2].File) + assert.Equal(t, types.MigrationTypeTenantMigration, migrations[2].MigrationType) + assert.Equal(t, script.File, migrations[3].File) + assert.Equal(t, types.MigrationTypeTenantScript, migrations[3].MigrationType) } func TestIntersect(t *testing.T) { @@ -173,26 +204,50 @@ func TestIntersect(t *testing.T) { diskMigrations := []types.Migration{mdef1, mdef2, mdef3, dev1, dev1p1, dev1p2, dev2, dev2p} dbMigrations := []types.Migration{mdef1, mdef2, mdef3, dev2, dev2p} - intersect := intersect(diskMigrations, dbMigrations) + coordinator := &coordinator{} + intersect := coordinator.intersect(diskMigrations, dbMigrations) assert.Len(t, intersect, 5) for i := range intersect { - assert.Equal(t, intersect[i].disk, intersect[i].db) - assert.Equal(t, intersect[i].disk, dbMigrations[i]) + assert.Equal(t, intersect[i].source, intersect[i].applied) + assert.Equal(t, intersect[i].source, dbMigrations[i]) } } -func TestVerifyCheckSumsOK(t *testing.T) { - mdef1 := types.Migration{Name: "20181111", SourceDir: "tenants", File: "tenants/20181111", MigrationType: types.MigrationTypeTenantMigration, CheckSum: "abc"} - mdef2 := types.Migration{Name: "20181111", SourceDir: "tenants", File: "tenants/20181111", MigrationType: types.MigrationTypeSingleMigration, CheckSum: "abc"} - verified, offendingMigrations := VerifyCheckSums([]types.Migration{mdef1}, []types.MigrationDB{{Migration: mdef2}}) +func TestVerifySourceMigrationsCheckSumsOK(t *testing.T) { + coordinator := New(context.TODO(), nil, newMockedConnector, newMockedDiskLoader, newMockedNotifier) + defer coordinator.Dispose() + verified, offendingMigrations := coordinator.VerifySourceMigrationsCheckSums() assert.True(t, verified) assert.Empty(t, offendingMigrations) } -func TestVerifyCheckSumsKO(t *testing.T) { - mdef1 := types.Migration{Name: "20181111", SourceDir: "tenants", File: "tenants/20181111", MigrationType: types.MigrationTypeTenantMigration, CheckSum: "abc"} - mdef2 := types.Migration{Name: "20181111", SourceDir: "tenants", File: "tenants/20181111", MigrationType: types.MigrationTypeSingleMigration, CheckSum: "abcd"} - verified, offendingMigrations := VerifyCheckSums([]types.Migration{mdef1}, []types.MigrationDB{{Migration: mdef2}}) +func TestVerifySourceMigrationsCheckSumsKO(t *testing.T) { + coordinator := New(context.TODO(), nil, newMockedConnector, newBrokenCheckSumMockedDiskLoader, newMockedNotifier) + defer coordinator.Dispose() + verified, offendingMigrations := coordinator.VerifySourceMigrationsCheckSums() assert.False(t, verified) - assert.Equal(t, mdef1, offendingMigrations[0]) + assert.Equal(t, coordinator.GetSourceMigrations()[0], offendingMigrations[0]) +} + +func TestApplyMigrations(t *testing.T) { + coordinator := New(context.TODO(), nil, newMockedConnector, newMockedDiskLoader, newMockedNotifier) + defer coordinator.Dispose() + _, appliedMigrations := coordinator.ApplyMigrations(types.ModeTypeApply) + assert.Len(t, appliedMigrations, 1) + assert.Equal(t, coordinator.GetSourceMigrations()[1], appliedMigrations[0]) +} + +func TestAddTenantAndApplyMigrations(t *testing.T) { + coordinator := New(context.TODO(), nil, newMockedConnector, newMockedDiskLoader, newMockedNotifier) + defer coordinator.Dispose() + _, appliedMigrations := coordinator.AddTenantAndApplyMigrations(types.ModeTypeApply, "new") + assert.Len(t, appliedMigrations, 1) + assert.Equal(t, coordinator.GetSourceMigrations()[1], appliedMigrations[0]) +} + +func TestGetTenants(t *testing.T) { + coordinator := New(context.TODO(), nil, newMockedConnector, newMockedDiskLoader, newMockedNotifier) + defer coordinator.Dispose() + tenants := coordinator.GetTenants() + assert.Equal(t, []string{"a", "b", "c"}, tenants) } diff --git a/db/db.go b/db/db.go index 2e50797..5cf72ef 100644 --- a/db/db.go +++ b/db/db.go @@ -4,6 +4,7 @@ import ( "context" "database/sql" "fmt" + "path/filepath" "strings" "time" @@ -14,29 +15,31 @@ import ( // Connector interface abstracts all DB operations performed by migrator type Connector interface { - Init() error - GetTenants() ([]string, error) - GetDBMigrations() ([]types.MigrationDB, error) - ApplyMigrations(context.Context, []types.Migration) error - AddTenantAndApplyMigrations(context.Context, string, []types.Migration) error + GetTenants() []string + GetAppliedMigrations() []types.MigrationDB + ApplyMigrations(types.MigrationsModeType, []types.Migration) *types.MigrationResults + AddTenantAndApplyMigrations(types.MigrationsModeType, string, []types.Migration) *types.MigrationResults Dispose() } // baseConnector struct is a base struct for implementing DB specific dialects type baseConnector struct { + ctx context.Context config *config.Config dialect dialect db *sql.DB } -// NewConnector constructs Connector instance based on the passed Config -func NewConnector(config *config.Config) (Connector, error) { - dialect, err := newDialect(config) - if err != nil { - return nil, err - } - connector := &baseConnector{config, dialect, nil} - return connector, nil +// Factory is a factory method for creating Loader instance +type Factory func(context.Context, *config.Config) Connector + +// New constructs Connector instance based on the passed Config +func New(ctx context.Context, config *config.Config) Connector { + dialect := newDialect(config) + connector := &baseConnector{ctx, config, dialect, nil} + connector.connect() + connector.init() + return connector } const ( @@ -46,48 +49,49 @@ const ( defaultSchemaPlaceHolder = "{schema}" ) -// Init initialises connector by opening a connection to database -func (bc *baseConnector) Init() error { +// connect connects to a database +func (bc *baseConnector) connect() { db, err := sql.Open(bc.config.Driver, bc.config.DataSource) if err != nil { - return err + panic(fmt.Sprintf("Failed to open connction to DB: %v", err.Error())) } - return bc.doInit(db) + bc.db = db } -// Init initialises connector by opening a connection to database -func (bc *baseConnector) doInit(db *sql.DB) error { - if err := db.Ping(); err != nil { - return fmt.Errorf("Failed to connect to database: %v", err) +// init initialises migrator by making sure proper schema/table are created +func (bc *baseConnector) init() { + if err := bc.db.Ping(); err != nil { + panic(fmt.Sprintf("Failed to connect to database: %v", err)) } - bc.db = db tx, err := bc.db.Begin() if err != nil { - return fmt.Errorf("Could not start DB transaction: %v", err) + panic(fmt.Sprintf("Could not start DB transaction: %v", err)) } // make sure migrator schema exists createSchema := bc.dialect.GetCreateSchemaSQL(migratorSchema) if _, err := bc.db.Query(createSchema); err != nil { - return fmt.Errorf("Could not create migrator schema: %v", err) + panic(fmt.Sprintf("Could not create migrator schema: %v", err)) } // make sure migrations table exists createMigrationsTable := bc.dialect.GetCreateMigrationsTableSQL() if _, err := bc.db.Query(createMigrationsTable); err != nil { - return fmt.Errorf("Could not create migrations table: %v", err) + panic(fmt.Sprintf("Could not create migrations table: %v", err)) } // if using default migrator tenants table make sure it exists if bc.config.TenantSelectSQL == "" { createTenantsTable := bc.dialect.GetCreateTenantsTableSQL() if _, err := bc.db.Query(createTenantsTable); err != nil { - return fmt.Errorf("Could not create default tenants table: %v", err) + panic(fmt.Sprintf("Could not create default tenants table: %v", err)) } } - return tx.Commit() + if err := tx.Commit(); err != nil { + panic(fmt.Sprintf("Could not commit transaction: %v", err)) + } } // Dispose closes all resources allocated by connector @@ -109,38 +113,36 @@ func (bc *baseConnector) getTenantSelectSQL() string { } // GetTenants returns a list of all DB tenants -func (bc *baseConnector) GetTenants() (tenants []string, err error) { +func (bc *baseConnector) GetTenants() []string { tenantSelectSQL := bc.getTenantSelectSQL() - tenants = []string{} + tenants := []string{} rows, err := bc.db.Query(tenantSelectSQL) if err != nil { - err = fmt.Errorf("Could not query tenants: %v", err) - return + panic(fmt.Sprintf("Could not query tenants: %v", err)) } for rows.Next() { var name string if err = rows.Scan(&name); err != nil { - err = fmt.Errorf("Could not read tenants: %v", err) - return + panic(fmt.Sprintf("Could not read tenants: %v", err)) } tenants = append(tenants, name) } - return + + return tenants } -// GetDBMigrations returns a list of all applied DB migrations -func (bc *baseConnector) GetDBMigrations() (dbMigrations []types.MigrationDB, err error) { +// GetAppliedMigrations returns a list of all applied DB migrations +func (bc *baseConnector) GetAppliedMigrations() []types.MigrationDB { query := bc.dialect.GetMigrationSelectSQL() - dbMigrations = []types.MigrationDB{} + dbMigrations := []types.MigrationDB{} rows, err := bc.db.Query(query) if err != nil { - err = fmt.Errorf("Could not query DB migrations: %v", err) - return + panic(fmt.Sprintf("Could not query DB migrations: %v", err.Error())) } for rows.Next() { @@ -150,98 +152,103 @@ func (bc *baseConnector) GetDBMigrations() (dbMigrations []types.MigrationDB, er filename string migrationType types.MigrationType schema string - created time.Time + appliedAt time.Time contents string checksum string ) - if err = rows.Scan(&name, &sourceDir, &filename, &migrationType, &schema, &created, &contents, &checksum); err != nil { - err = fmt.Errorf("Could not read DB migration: %v", err) - return + if err = rows.Scan(&name, &sourceDir, &filename, &migrationType, &schema, &appliedAt, &contents, &checksum); err != nil { + panic(fmt.Sprintf("Could not read DB migration: %v", err.Error())) } mdef := types.Migration{Name: name, SourceDir: sourceDir, File: filename, MigrationType: migrationType, Contents: contents, CheckSum: checksum} - dbMigrations = append(dbMigrations, types.MigrationDB{Migration: mdef, Schema: schema, Created: created}) + dbMigrations = append(dbMigrations, types.MigrationDB{Migration: mdef, Schema: schema, AppliedAt: appliedAt}) } - - return + return dbMigrations } // ApplyMigrations applies passed migrations -func (bc *baseConnector) ApplyMigrations(ctx context.Context, migrations []types.Migration) (err error) { +func (bc *baseConnector) ApplyMigrations(mode types.MigrationsModeType, migrations []types.Migration) *types.MigrationResults { if len(migrations) == 0 { - return + return &types.MigrationResults{ + StartedAt: time.Now(), + Duration: 0, + } } - tenants, err := bc.GetTenants() - if err != nil { - return - } + tenants := bc.GetTenants() tx, err := bc.db.Begin() if err != nil { - return + panic(fmt.Sprintf("Could not start transaction: %v", err.Error())) } defer func() { r := recover() if r == nil { - err = tx.Commit() + if mode == types.ModeTypeDryRun { + common.LogInfo(bc.ctx, "Running in dry-run mode, calling rollback") + tx.Rollback() + } else { + common.LogInfo(bc.ctx, "Running in %v mode, committing transaction", mode) + if err := tx.Commit(); err != nil { + panic(fmt.Sprintf("Could not commit transaction: %v", err.Error())) + } + } } else { - common.LogInfo(ctx, "Recovered in ApplyMigrations. Transaction rollback.") + common.LogInfo(bc.ctx, "Recovered in ApplyMigrations. Transaction rollback.") tx.Rollback() - var ok bool - err, ok = r.(error) - if !ok { - err = fmt.Errorf("%v", r) - } + panic(r) } }() - bc.applyMigrationsInTx(ctx, tx, tenants, migrations) - return + return bc.applyMigrationsInTx(tx, mode, tenants, migrations) } // AddTenantAndApplyMigrations adds new tenant and applies all existing tenant migrations -func (bc *baseConnector) AddTenantAndApplyMigrations(ctx context.Context, tenant string, migrations []types.Migration) (err error) { +func (bc *baseConnector) AddTenantAndApplyMigrations(mode types.MigrationsModeType, tenant string, migrations []types.Migration) *types.MigrationResults { tenantInsertSQL := bc.getTenantInsertSQL() tx, err := bc.db.Begin() if err != nil { - return + panic(fmt.Sprintf("Could not start transaction: %v", err.Error())) } defer func() { r := recover() if r == nil { - err = tx.Commit() + if mode == types.ModeTypeDryRun { + common.LogInfo(bc.ctx, "Running in dry-run mode, calling rollback") + tx.Rollback() + } else { + common.LogInfo(bc.ctx, "Running in %v mode, committing transaction", mode) + if err := tx.Commit(); err != nil { + panic(fmt.Sprintf("Could not commit transaction: %v", err.Error())) + } + } } else { - common.LogInfo(ctx, "Recovered in AddTenantAndApplyMigrations. Transaction rollback.") + common.LogInfo(bc.ctx, "Recovered in AddTenantAndApplyMigrations. Transaction rollback.") tx.Rollback() - var ok bool - err, ok = r.(error) - if !ok { - err = fmt.Errorf("%v", r) - } + panic(r) } }() createSchema := bc.dialect.GetCreateSchemaSQL(tenant) if _, err = tx.Exec(createSchema); err != nil { - common.LogPanic(ctx, "Create schema failed, transaction rollback was called: %v", err) + panic(fmt.Sprintf("Create schema failed: %v", err)) } insert, err := bc.db.Prepare(tenantInsertSQL) if err != nil { - common.LogPanic(ctx, "Could not create prepared statement: %v", err) + panic(fmt.Sprintf("Could not create prepared statement: %v", err)) } _, err = tx.Stmt(insert).Exec(tenant) if err != nil { - common.LogPanic(ctx, "Failed to add tenant entry: %v", err) + panic(fmt.Sprintf("Failed to add tenant entry: %v", err)) } - bc.applyMigrationsInTx(ctx, tx, []string{tenant}, migrations) + results := bc.applyMigrationsInTx(tx, mode, []string{tenant}, migrations) - return + return results } // getTenantInsertSQL returns tenant insert SQL statement from configuration file @@ -258,7 +265,7 @@ func (bc *baseConnector) getTenantInsertSQL() string { return tenantInsertSQL } -// GetSchemaPlaceHolder returns a schema placeholder which is +// getSchemaPlaceHolder returns a schema placeholder which is // either the default one or overridden by user in config func (bc *baseConnector) getSchemaPlaceHolder() string { var schemaPlaceHolder string @@ -270,39 +277,66 @@ func (bc *baseConnector) getSchemaPlaceHolder() string { return schemaPlaceHolder } -func (bc *baseConnector) applyMigrationsInTx(ctx context.Context, tx *sql.Tx, tenants []string, migrations []types.Migration) { +func (bc *baseConnector) applyMigrationsInTx(tx *sql.Tx, mode types.MigrationsModeType, tenants []string, migrations []types.Migration) *types.MigrationResults { + + results := &types.MigrationResults{ + StartedAt: time.Now(), + Tenants: len(tenants), + } + + defer func() { + results.Duration = time.Now().Sub(results.StartedAt) + results.MigrationsGrandTotal = results.TenantMigrationsTotal + results.SingleMigrations + results.ScriptsGrandTotal = results.TenantScriptsTotal + results.SingleScripts + }() + schemaPlaceHolder := bc.getSchemaPlaceHolder() insertMigrationSQL := bc.dialect.GetMigrationInsertSQL() insert, err := bc.db.Prepare(insertMigrationSQL) if err != nil { - common.LogPanic(ctx, "Could not create prepared statement: %v", err) + panic(fmt.Sprintf("Could not create prepared statement: %v", err)) } for _, m := range migrations { var schemas []string - // TODO check if golang supports "in" if m.MigrationType == types.MigrationTypeTenantMigration || m.MigrationType == types.MigrationTypeTenantScript { schemas = tenants } else { - schemas = []string{m.SourceDir} + schemas = []string{filepath.Base(m.SourceDir)} } for _, s := range schemas { - common.LogInfo(ctx, "Applying migration type: %d, schema: %s, file: %s ", m.MigrationType, s, m.File) + common.LogInfo(bc.ctx, "Applying migration type: %d, schema: %s, file: %s ", m.MigrationType, s, m.File) - contents := strings.Replace(m.Contents, schemaPlaceHolder, s, -1) - - _, err = tx.Exec(contents) - if err != nil { - common.LogPanic(ctx, "SQL migration failed: %v", err) + if mode != types.ModeTypeSync { + contents := strings.Replace(m.Contents, schemaPlaceHolder, s, -1) + if _, err = tx.Exec(contents); err != nil { + panic(fmt.Sprintf("SQL migration %v failed with error: %v", m.File, err.Error())) + } } - _, err = tx.Stmt(insert).Exec(m.Name, m.SourceDir, m.File, m.MigrationType, s, m.Contents, m.CheckSum) - if err != nil { - common.LogPanic(ctx, "Failed to add migration entry: %v", err) + if _, err = tx.Stmt(insert).Exec(m.Name, m.SourceDir, m.File, m.MigrationType, s, m.Contents, m.CheckSum); err != nil { + panic(fmt.Sprintf("Failed to add migration entry: %v", err.Error())) } } + if m.MigrationType == types.MigrationTypeSingleMigration { + results.SingleMigrations++ + } + if m.MigrationType == types.MigrationTypeSingleScript { + results.SingleScripts++ + } + if m.MigrationType == types.MigrationTypeTenantMigration { + results.TenantMigrations++ + results.TenantMigrationsTotal += len(schemas) + } + if m.MigrationType == types.MigrationTypeTenantScript { + results.TenantScripts++ + results.TenantScriptsTotal += len(schemas) + } + } + + return results } diff --git a/db/db_dialect.go b/db/db_dialect.go index 372411a..dc9bc96 100644 --- a/db/db_dialect.go +++ b/db/db_dialect.go @@ -78,7 +78,7 @@ func (bd *baseDialect) GetCreateSchemaSQL(schema string) string { } // newDialect constructs dialect instance based on the passed Config -func newDialect(config *config.Config) (dialect, error) { +func newDialect(config *config.Config) dialect { var dialect dialect @@ -90,8 +90,8 @@ func newDialect(config *config.Config) (dialect, error) { case "postgres": dialect = &postgreSQLDialect{} default: - return nil, fmt.Errorf("Failed to create Connector: %q is an unknown driver", config.Driver) + panic(fmt.Sprintf("Failed to create Connector unknown driver: %v", config.Driver)) } - return dialect, nil + return dialect } diff --git a/db/db_dialect_test.go b/db/db_dialect_test.go new file mode 100644 index 0000000..03b7eef --- /dev/null +++ b/db/db_dialect_test.go @@ -0,0 +1,71 @@ +package db + +import ( + "testing" + + "github.com/lukaszbudnik/migrator/config" + "github.com/stretchr/testify/assert" +) + +func TestBaseDialectGetCreateTenantsTableSQL(t *testing.T) { + config, err := config.FromFile("../test/migrator.yaml") + assert.Nil(t, err) + + config.Driver = "postgres" + + dialect := newDialect(config) + + createTenantsTableSQL := dialect.GetCreateTenantsTableSQL() + + expected := ` +create table if not exists migrator.migrator_tenants ( + id serial primary key, + name varchar(200) not null, + created timestamp default now() +) +` + + assert.Equal(t, expected, createTenantsTableSQL) +} + +func TestBaseDialectGetCreateMigrationsTableSQL(t *testing.T) { + config, err := config.FromFile("../test/migrator.yaml") + assert.Nil(t, err) + + config.Driver = "postgres" + + dialect := newDialect(config) + + createMigrationsTableSQL := dialect.GetCreateMigrationsTableSQL() + + expected := ` +create table if not exists migrator.migrator_migrations ( + id serial primary key, + name varchar(200) not null, + source_dir varchar(200) not null, + filename varchar(200) not null, + type int not null, + db_schema varchar(200) not null, + created timestamp default now(), + contents text, + checksum varchar(64) +) +` + + assert.Equal(t, expected, createMigrationsTableSQL) +} + +func TestBaseDialectGetCreateSchemaSQL(t *testing.T) { + config, err := config.FromFile("../test/migrator.yaml") + assert.Nil(t, err) + + config.Driver = "postgres" + + dialect := newDialect(config) + + createSchemaSQL := dialect.GetCreateSchemaSQL("abc") + + expected := "create schema if not exists abc" + + assert.Equal(t, expected, createSchemaSQL) +} diff --git a/db/db_error_handling_test.go b/db/db_error_handling_test.go new file mode 100644 index 0000000..a278187 --- /dev/null +++ b/db/db_error_handling_test.go @@ -0,0 +1,452 @@ +package db + +import ( + "errors" + "fmt" + "testing" + "time" + + "github.com/DATA-DOG/go-sqlmock" + "github.com/lukaszbudnik/migrator/config" + "github.com/lukaszbudnik/migrator/types" + "github.com/stretchr/testify/assert" +) + +func TestInitCannotBeginTransactionError(t *testing.T) { + db, mock, err := sqlmock.New() + assert.Nil(t, err) + + config := &config.Config{} + config.Driver = "sqlmock" + connector := baseConnector{newTestContext(), config, nil, db} + + mock.ExpectBegin().WillReturnError(errors.New("trouble maker")) + + assert.PanicsWithValue(t, "Could not start DB transaction: trouble maker", func() { + connector.init() + }) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestInitCannotCreateMigratorSchema(t *testing.T) { + db, mock, err := sqlmock.New() + assert.Nil(t, err) + + config := &config.Config{} + config.Driver = "postgres" + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} + + mock.ExpectBegin() + // don't have to provide full SQL here - patterns at work + mock.ExpectQuery("create schema").WillReturnError(errors.New("trouble maker")) + + assert.PanicsWithValue(t, "Could not create migrator schema: trouble maker", func() { + connector.init() + }) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestInitCannotCreateMigratorMigrationsTable(t *testing.T) { + db, mock, err := sqlmock.New() + assert.Nil(t, err) + + config := &config.Config{} + config.Driver = "postgres" + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} + + mock.ExpectBegin() + // don't have to provide full SQL here - patterns at work + mock.ExpectQuery("create schema").WillReturnRows() + mock.ExpectQuery("create table").WillReturnError(errors.New("trouble maker")) + + assert.PanicsWithValue(t, "Could not create migrations table: trouble maker", func() { + connector.init() + }) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestInitCannotCreateMigratorTenantsTable(t *testing.T) { + db, mock, err := sqlmock.New() + assert.Nil(t, err) + + config := &config.Config{} + config.Driver = "postgres" + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} + + mock.ExpectBegin() + // don't have to provide full SQL here - patterns at work + mock.ExpectQuery("create schema").WillReturnRows() + mock.ExpectQuery("create table").WillReturnRows() + mock.ExpectQuery("create table").WillReturnError(errors.New("trouble maker")) + + assert.PanicsWithValue(t, "Could not create default tenants table: trouble maker", func() { + connector.init() + }) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestInitCannotCommitTransaction(t *testing.T) { + db, mock, err := sqlmock.New() + assert.Nil(t, err) + + config := &config.Config{} + config.Driver = "postgres" + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} + + mock.ExpectBegin() + // don't have to provide full SQL here - patterns at work + mock.ExpectQuery("create schema").WillReturnRows() + mock.ExpectQuery("create table").WillReturnRows() + mock.ExpectQuery("create table").WillReturnRows() + mock.ExpectCommit().WillReturnError(errors.New("trouble maker")) + + assert.PanicsWithValue(t, "Could not commit transaction: trouble maker", func() { + connector.init() + }) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestGetTenantsError(t *testing.T) { + db, mock, err := sqlmock.New() + assert.Nil(t, err) + + config := &config.Config{} + config.Driver = "postgres" + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} + + // don't have to provide full SQL here - patterns at work + mock.ExpectQuery("select").WillReturnError(errors.New("trouble maker")) + + assert.PanicsWithValue(t, "Could not query tenants: trouble maker", func() { + connector.GetTenants() + }) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestGetAppliedMigrationsError(t *testing.T) { + db, mock, err := sqlmock.New() + assert.Nil(t, err) + + config := &config.Config{} + config.Driver = "postgres" + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} + + // don't have to provide full SQL here - patterns at work + mock.ExpectQuery("select").WillReturnError(errors.New("trouble maker")) + + assert.PanicsWithValue(t, "Could not query DB migrations: trouble maker", func() { + connector.GetAppliedMigrations() + }) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestApplyTransactionBeginError(t *testing.T) { + db, mock, err := sqlmock.New() + assert.Nil(t, err) + + config := &config.Config{} + config.Driver = "postgres" + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} + + rows := sqlmock.NewRows([]string{"name"}).AddRow("tenantname") + mock.ExpectQuery("select").WillReturnRows(rows) + mock.ExpectBegin().WillReturnError(errors.New("trouble maker tx.Begin()")) + + t1 := time.Now().UnixNano() + tenant1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} + migrationsToApply := []types.Migration{tenant1} + + assert.PanicsWithValue(t, "Could not start transaction: trouble maker tx.Begin()", func() { + connector.ApplyMigrations(types.ModeTypeApply, migrationsToApply) + }) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestApplyInsertMigrationPreparedStatementError(t *testing.T) { + db, mock, err := sqlmock.New() + assert.Nil(t, err) + + config := &config.Config{} + config.Driver = "postgres" + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} + + tenants := sqlmock.NewRows([]string{"name"}).AddRow("tenantname") + mock.ExpectQuery("select").WillReturnRows(tenants) + mock.ExpectBegin() + mock.ExpectPrepare("insert into").WillReturnError(errors.New("trouble maker")) + mock.ExpectRollback() + + t1 := time.Now().UnixNano() + tenant1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} + migrationsToApply := []types.Migration{tenant1} + + assert.PanicsWithValue(t, "Could not create prepared statement: trouble maker", func() { + connector.ApplyMigrations(types.ModeTypeApply, migrationsToApply) + }) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestApplyMigrationSQLError(t *testing.T) { + db, mock, err := sqlmock.New() + assert.Nil(t, err) + + config := &config.Config{} + config.Driver = "postgres" + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} + + tenants := sqlmock.NewRows([]string{"name"}).AddRow("tenantname") + mock.ExpectQuery("select").WillReturnRows(tenants) + mock.ExpectBegin() + mock.ExpectPrepare("insert into") + mock.ExpectExec("insert into").WillReturnError(errors.New("trouble maker")) + mock.ExpectRollback() + + t1 := time.Now().UnixNano() + tenant1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} + migrationsToApply := []types.Migration{tenant1} + + assert.PanicsWithValue(t, fmt.Sprintf("SQL migration %v failed with error: trouble maker", tenant1.File), func() { + connector.ApplyMigrations(types.ModeTypeApply, migrationsToApply) + }) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestApplyInsertMigrationError(t *testing.T) { + db, mock, err := sqlmock.New() + assert.Nil(t, err) + + config := &config.Config{} + config.Driver = "postgres" + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} + + time := time.Now().UnixNano() + m := types.Migration{Name: fmt.Sprintf("%v.sql", time), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", time), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} + migrationsToApply := []types.Migration{m} + + tenant := "tenantname" + tenants := sqlmock.NewRows([]string{"name"}).AddRow(tenant) + mock.ExpectQuery("select").WillReturnRows(tenants) + mock.ExpectBegin() + mock.ExpectPrepare("insert into") + mock.ExpectExec("insert into").WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectPrepare("insert into").ExpectExec().WithArgs(m.Name, m.SourceDir, m.File, m.MigrationType, tenant, m.Contents, m.CheckSum).WillReturnError(errors.New("trouble maker")) + mock.ExpectRollback() + + assert.PanicsWithValue(t, "Failed to add migration entry: trouble maker", func() { + connector.ApplyMigrations(types.ModeTypeApply, migrationsToApply) + }) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestApplyMigrationsCommitError(t *testing.T) { + db, mock, err := sqlmock.New() + assert.Nil(t, err) + + config := &config.Config{} + config.Driver = "postgres" + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} + + time := time.Now().UnixNano() + m := types.Migration{Name: fmt.Sprintf("%v.sql", time), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", time), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} + migrationsToApply := []types.Migration{m} + + tenant := "tenantname" + tenants := sqlmock.NewRows([]string{"name"}).AddRow(tenant) + mock.ExpectQuery("select").WillReturnRows(tenants) + mock.ExpectBegin() + mock.ExpectPrepare("insert into") + mock.ExpectExec("insert into").WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectPrepare("insert into").ExpectExec().WithArgs(m.Name, m.SourceDir, m.File, m.MigrationType, tenant, m.Contents, m.CheckSum).WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectCommit().WillReturnError(errors.New("tx trouble maker")) + + assert.PanicsWithValue(t, "Could not commit transaction: tx trouble maker", func() { + connector.ApplyMigrations(types.ModeTypeApply, migrationsToApply) + }) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestAddTenantTransactionBeginError(t *testing.T) { + db, mock, err := sqlmock.New() + assert.Nil(t, err) + + config := &config.Config{} + config.Driver = "postgres" + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} + + mock.ExpectBegin().WillReturnError(errors.New("trouble maker tx.Begin()")) + + t1 := time.Now().UnixNano() + tenant1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} + migrationsToApply := []types.Migration{tenant1} + + assert.PanicsWithValue(t, "Could not start transaction: trouble maker tx.Begin()", func() { + connector.AddTenantAndApplyMigrations(types.ModeTypeApply, "newtenant", migrationsToApply) + }) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestAddTenantAndApplyMigrationsCreateSchemaError(t *testing.T) { + db, mock, err := sqlmock.New() + assert.Nil(t, err) + + config := &config.Config{} + config.Driver = "postgres" + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} + + mock.ExpectBegin() + mock.ExpectExec("create schema").WillReturnError(errors.New("trouble maker")) + mock.ExpectRollback() + + t1 := time.Now().UnixNano() + tenant1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} + migrationsToApply := []types.Migration{tenant1} + + assert.PanicsWithValue(t, "Create schema failed: trouble maker", func() { + connector.AddTenantAndApplyMigrations(types.ModeTypeApply, "newtenant", migrationsToApply) + }) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestAddTenantAndApplyMigrationsInsertTenantPreparedStatementError(t *testing.T) { + db, mock, err := sqlmock.New() + assert.Nil(t, err) + + config := &config.Config{} + config.Driver = "postgres" + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} + + mock.ExpectBegin() + mock.ExpectExec("create schema").WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectPrepare("insert into").WillReturnError(errors.New("trouble maker")) + mock.ExpectRollback() + + t1 := time.Now().UnixNano() + tenant1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} + migrationsToApply := []types.Migration{tenant1} + + assert.PanicsWithValue(t, "Could not create prepared statement: trouble maker", func() { + connector.AddTenantAndApplyMigrations(types.ModeTypeApply, "newtenant", migrationsToApply) + }) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestAddTenantAndApplyMigrationsInsertTenantError(t *testing.T) { + db, mock, err := sqlmock.New() + assert.Nil(t, err) + + config := &config.Config{} + config.Driver = "postgres" + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} + + tenant := "tenant" + + mock.ExpectBegin() + mock.ExpectExec("create schema").WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectPrepare("insert into") + mock.ExpectPrepare("insert into").ExpectExec().WithArgs(tenant).WillReturnError(errors.New("trouble maker")) + mock.ExpectRollback() + + t1 := time.Now().UnixNano() + m1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} + migrationsToApply := []types.Migration{m1} + + assert.PanicsWithValue(t, "Failed to add tenant entry: trouble maker", func() { + connector.AddTenantAndApplyMigrations(types.ModeTypeApply, tenant, migrationsToApply) + }) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} + +func TestAddTenantAndApplyMigrationsCommitError(t *testing.T) { + db, mock, err := sqlmock.New() + assert.Nil(t, err) + + config := &config.Config{} + config.Driver = "postgres" + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} + + time := time.Now().UnixNano() + m := types.Migration{Name: fmt.Sprintf("%v.sql", time), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", time), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} + migrationsToApply := []types.Migration{m} + + tenant := "tenantname" + mock.ExpectBegin() + mock.ExpectExec("create schema").WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectPrepare("insert into") + mock.ExpectPrepare("insert into").ExpectExec().WithArgs(tenant).WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectPrepare("insert into") + mock.ExpectExec("insert into").WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectPrepare("insert into").ExpectExec().WithArgs(m.Name, m.SourceDir, m.File, m.MigrationType, tenant, m.Contents, m.CheckSum).WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectCommit().WillReturnError(errors.New("tx trouble maker")) + + assert.PanicsWithValue(t, "Could not commit transaction: tx trouble maker", func() { + connector.AddTenantAndApplyMigrations(types.ModeTypeApply, tenant, migrationsToApply) + }) + + if err := mock.ExpectationsWereMet(); err != nil { + t.Errorf("there were unfulfilled expectations: %s", err) + } +} diff --git a/db/db_mssql_test.go b/db/db_mssql_test.go new file mode 100644 index 0000000..0f985a3 --- /dev/null +++ b/db/db_mssql_test.go @@ -0,0 +1,116 @@ +package db + +import ( + "testing" + + "github.com/lukaszbudnik/migrator/config" + "github.com/stretchr/testify/assert" +) + +func TestDBCreateDialectMSSQLDriver(t *testing.T) { + config := &config.Config{} + config.Driver = "sqlserver" + dialect := newDialect(config) + assert.IsType(t, &msSQLDialect{}, dialect) +} + +func TestMSSQLGetMigrationInsertSQL(t *testing.T) { + config, err := config.FromFile("../test/migrator.yaml") + assert.Nil(t, err) + + config.Driver = "sqlserver" + + dialect := newDialect(config) + + insertMigrationSQL := dialect.GetMigrationInsertSQL() + + assert.Equal(t, "insert into migrator.migrator_migrations (name, source_dir, filename, type, db_schema, contents, checksum) values (@p1, @p2, @p3, @p4, @p5, @p6, @p7)", insertMigrationSQL) +} + +func TestMSSQLGetTenantInsertSQLDefault(t *testing.T) { + config, err := config.FromFile("../test/migrator.yaml") + assert.Nil(t, err) + + config.Driver = "sqlserver" + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, nil} + defer connector.Dispose() + + tenantInsertSQL := connector.getTenantInsertSQL() + + assert.Equal(t, "insert into migrator.migrator_tenants (name) values (@p1)", tenantInsertSQL) +} + +func TestMSSQLDialectGetCreateTenantsTableSQL(t *testing.T) { + config, err := config.FromFile("../test/migrator.yaml") + assert.Nil(t, err) + + config.Driver = "sqlserver" + + dialect := newDialect(config) + + createTenantsTableSQL := dialect.GetCreateTenantsTableSQL() + + expected := ` +IF NOT EXISTS (select * from information_schema.tables where table_schema = 'migrator' and table_name = 'migrator_tenants') +BEGIN + create table [migrator].migrator_tenants ( + id int identity (1,1) primary key, + name varchar(200) not null, + created datetime default CURRENT_TIMESTAMP + ); +END +` + + assert.Equal(t, expected, createTenantsTableSQL) +} + +func TestMSSQLDialectGetCreateMigrationsTableSQL(t *testing.T) { + config, err := config.FromFile("../test/migrator.yaml") + assert.Nil(t, err) + + config.Driver = "sqlserver" + + dialect := newDialect(config) + + createMigrationsTableSQL := dialect.GetCreateMigrationsTableSQL() + + expected := ` +IF NOT EXISTS (select * from information_schema.tables where table_schema = 'migrator' and table_name = 'migrator_migrations') +BEGIN + create table [migrator].migrator_migrations ( + id int identity (1,1) primary key, + name varchar(200) not null, + source_dir varchar(200) not null, + filename varchar(200) not null, + type int not null, + db_schema varchar(200) not null, + created datetime default CURRENT_TIMESTAMP, + contents text, + checksum varchar(64) + ); +END +` + + assert.Equal(t, expected, createMigrationsTableSQL) +} + +func TestMSSQLDialectGetCreateSchemaSQL(t *testing.T) { + config, err := config.FromFile("../test/migrator.yaml") + assert.Nil(t, err) + + config.Driver = "sqlserver" + + dialect := newDialect(config) + + createSchemaSQL := dialect.GetCreateSchemaSQL("def") + + expected := ` +IF NOT EXISTS (select * from information_schema.schemata where schema_name = 'def') +BEGIN + EXEC sp_executesql N'create schema def'; +END +` + + assert.Equal(t, expected, createSchemaSQL) +} diff --git a/db/db_mysql_test.go b/db/db_mysql_test.go new file mode 100644 index 0000000..37e49dd --- /dev/null +++ b/db/db_mysql_test.go @@ -0,0 +1,42 @@ +package db + +import ( + "testing" + + "github.com/lukaszbudnik/migrator/config" + "github.com/stretchr/testify/assert" +) + +func TestDBCreateDialectMysqlDriver(t *testing.T) { + config := &config.Config{} + config.Driver = "mysql" + dialect := newDialect(config) + assert.IsType(t, &mySQLDialect{}, dialect) +} + +func TestMySQLGetMigrationInsertSQL(t *testing.T) { + config, err := config.FromFile("../test/migrator.yaml") + assert.Nil(t, err) + + config.Driver = "mysql" + + dialect := newDialect(config) + + insertMigrationSQL := dialect.GetMigrationInsertSQL() + + assert.Equal(t, "insert into migrator.migrator_migrations (name, source_dir, filename, type, db_schema, contents, checksum) values (?, ?, ?, ?, ?, ?, ?)", insertMigrationSQL) +} + +func TestMySQLGetTenantInsertSQLDefault(t *testing.T) { + config, err := config.FromFile("../test/migrator.yaml") + assert.Nil(t, err) + + config.Driver = "mysql" + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, nil} + defer connector.Dispose() + + tenantInsertSQL := connector.getTenantInsertSQL() + + assert.Equal(t, "insert into migrator.migrator_tenants (name) values (?)", tenantInsertSQL) +} diff --git a/db/db_postgresql_test.go b/db/db_postgresql_test.go new file mode 100644 index 0000000..1d3ffad --- /dev/null +++ b/db/db_postgresql_test.go @@ -0,0 +1,42 @@ +package db + +import ( + "testing" + + "github.com/lukaszbudnik/migrator/config" + "github.com/stretchr/testify/assert" +) + +func TestDBCreateDialectPostgreSQLDriver(t *testing.T) { + config := &config.Config{} + config.Driver = "postgres" + dialect := newDialect(config) + assert.IsType(t, &postgreSQLDialect{}, dialect) +} + +func TestPostgreSQLGetMigrationInsertSQL(t *testing.T) { + config, err := config.FromFile("../test/migrator.yaml") + assert.Nil(t, err) + + config.Driver = "postgres" + + dialect := newDialect(config) + + insertMigrationSQL := dialect.GetMigrationInsertSQL() + + assert.Equal(t, "insert into migrator.migrator_migrations (name, source_dir, filename, type, db_schema, contents, checksum) values ($1, $2, $3, $4, $5, $6, $7)", insertMigrationSQL) +} + +func TestPostgreSQLGetTenantInsertSQLDefault(t *testing.T) { + config, err := config.FromFile("../test/migrator.yaml") + assert.Nil(t, err) + + config.Driver = "postgres" + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, nil} + defer connector.Dispose() + + tenantInsertSQL := connector.getTenantInsertSQL() + + assert.Equal(t, "insert into migrator.migrator_tenants (name) values ($1)", tenantInsertSQL) +} diff --git a/db/db_test.go b/db/db_test.go index dd76b95..af4a30c 100644 --- a/db/db_test.go +++ b/db/db_test.go @@ -2,27 +2,21 @@ package db import ( "context" - "errors" "fmt" - "runtime" "strings" "testing" "time" + sqlmock "github.com/DATA-DOG/go-sqlmock" "github.com/lukaszbudnik/migrator/common" "github.com/lukaszbudnik/migrator/config" "github.com/lukaszbudnik/migrator/types" "github.com/stretchr/testify/assert" - "gopkg.in/DATA-DOG/go-sqlmock.v2" ) func newTestContext() context.Context { - pc, _, _, _ := runtime.Caller(1) - details := runtime.FuncForPC(pc) - ctx := context.TODO() - ctx = context.WithValue(ctx, common.RequestIDKey{}, "123") - ctx = context.WithValue(ctx, common.ActionKey{}, strings.Replace(details.Name(), "github.com/lukaszbudnik/migrator/db.", "", -1)) + ctx = context.WithValue(ctx, common.RequestIDKey{}, time.Now().Nanosecond()) return ctx } @@ -30,91 +24,60 @@ func TestDBCreateConnectorPanicUnknownDriver(t *testing.T) { config := &config.Config{} config.Driver = "abcxyz" - _, err := NewConnector(config) - assert.Contains(t, err.Error(), "unknown driver") -} - -func TestBaseConnectorPanicUnknownDriver(t *testing.T) { - config := &config.Config{} - config.Driver = "sfsdf" - connector := baseConnector{config, nil, nil} - err := connector.Init() - assert.Contains(t, err.Error(), "unknown driver") + assert.PanicsWithValue(t, "Failed to create Connector unknown driver: abcxyz", func() { + New(newTestContext(), config) + }) } -func TestDBCreateDialectPostgreSQLDriver(t *testing.T) { - config := &config.Config{} - config.Driver = "postgres" - dialect, err := newDialect(config) +func TestConnectorInitPanicConnectionError(t *testing.T) { + config, err := config.FromFile("../test/migrator.yaml") assert.Nil(t, err) - assert.IsType(t, &postgreSQLDialect{}, dialect) -} -func TestDBCreateDialectMysqlDriver(t *testing.T) { - config := &config.Config{} - config.Driver = "mysql" - dialect, err := newDialect(config) - assert.Nil(t, err) - assert.IsType(t, &mySQLDialect{}, dialect) -} + config.DataSource = strings.Replace(config.DataSource, "127.0.0.1", "1.0.0.1", -1) -func TestDBCreateDialectMSSQLDriver(t *testing.T) { - config := &config.Config{} - config.Driver = "sqlserver" - dialect, err := newDialect(config) - assert.Nil(t, err) - assert.IsType(t, &msSQLDialect{}, dialect) -} + didPanic := false + var message interface{} + func() { -func TestDBConnectorInitPanicConnectionError(t *testing.T) { - config, err := config.FromFile("../test/migrator.yaml") - assert.Nil(t, err) + defer func() { + if message = recover(); message != nil { + didPanic = true + } + }() - config.DataSource = strings.Replace(config.DataSource, "127.0.0.1", "1.0.0.1", -1) + New(newTestContext(), config) - connector, err := NewConnector(config) - assert.Nil(t, err) - err = connector.Init() - assert.Contains(t, err.Error(), "Failed to connect to database") + }() + assert.True(t, didPanic) + assert.Contains(t, message, "Failed to connect to database") } -func TestDBGetTenants(t *testing.T) { +func TestGetTenants(t *testing.T) { config, err := config.FromFile("../test/migrator.yaml") assert.Nil(t, err) - connector, err := NewConnector(config) - assert.Nil(t, err) - - err = connector.Init() - assert.Nil(t, err) + connector := New(newTestContext(), config) defer connector.Dispose() - tenants, err := connector.GetTenants() + tenants := connector.GetTenants() - assert.Nil(t, err) assert.True(t, len(tenants) >= 3) assert.Contains(t, tenants, "abc") assert.Contains(t, tenants, "def") assert.Contains(t, tenants, "xyz") } -func TestDBApplyMigrations(t *testing.T) { +func TestApplyMigrations(t *testing.T) { config, err := config.FromFile("../test/migrator.yaml") assert.Nil(t, err) - connector, err := NewConnector(config) - assert.Nil(t, err) - connector.Init() + connector := New(newTestContext(), config) defer connector.Dispose() - tenants, err := connector.GetTenants() - assert.Nil(t, err) - + tenants := connector.GetTenants() noOfTenants := len(tenants) - dbMigrationsBefore, err := connector.GetDBMigrations() - assert.Nil(t, err) - + dbMigrationsBefore := connector.GetAppliedMigrations() lenBefore := len(dbMigrationsBefore) p1 := time.Now().UnixNano() @@ -146,11 +109,19 @@ func TestDBApplyMigrations(t *testing.T) { migrationsToApply := []types.Migration{public1, public2, public3, tenant1, tenant2, tenant3, public4, public5, tenant4} - connector.ApplyMigrations(newTestContext(), migrationsToApply) + results := connector.ApplyMigrations(types.ModeTypeApply, migrationsToApply) - dbMigrationsAfter, err := connector.GetDBMigrations() - assert.Nil(t, err) + assert.Equal(t, noOfTenants, results.Tenants) + assert.Equal(t, 3, results.SingleMigrations) + assert.Equal(t, 2, results.SingleScripts) + assert.Equal(t, 3, results.TenantMigrations) + assert.Equal(t, 1, results.TenantScripts) + assert.Equal(t, noOfTenants*3, results.TenantMigrationsTotal) + assert.Equal(t, noOfTenants*1, results.TenantScriptsTotal) + assert.Equal(t, noOfTenants*3+3, results.MigrationsGrandTotal) + assert.Equal(t, noOfTenants*1+2, results.ScriptsGrandTotal) + dbMigrationsAfter := connector.GetAppliedMigrations() lenAfter := len(dbMigrationsAfter) // 3 tenant migrations * no of tenants + 3 public @@ -159,601 +130,63 @@ func TestDBApplyMigrations(t *testing.T) { assert.Equal(t, expected, lenAfter-lenBefore) } -func TestDBApplyMigrationsEmptyMigrationArray(t *testing.T) { +func TestApplyMigrationsEmptyMigrationArray(t *testing.T) { config, err := config.FromFile("../test/migrator.yaml") assert.Nil(t, err) - connector, err := NewConnector(config) - assert.Nil(t, err) - connector.Init() + connector := New(newTestContext(), config) defer connector.Dispose() - dbMigrationsBefore, err := connector.GetDBMigrations() - assert.Nil(t, err) - - lenBefore := len(dbMigrationsBefore) - migrationsToApply := []types.Migration{} - connector.ApplyMigrations(newTestContext(), migrationsToApply) - - dbMigrationsAfter, err := connector.GetDBMigrations() - assert.Nil(t, err) - - lenAfter := len(dbMigrationsAfter) - - assert.Equal(t, lenAfter, lenBefore) -} - -func TestGetTenantsSQLDefault(t *testing.T) { - config, err := config.FromFile("../test/migrator.yaml") - assert.Nil(t, err) - - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - defer connector.Dispose() - - tenantSelectSQL := connector.getTenantSelectSQL() - - assert.Equal(t, "select name from migrator.migrator_tenants", tenantSelectSQL) -} - -func TestGetTenantsSQLOverride(t *testing.T) { - config, err := config.FromFile("../test/migrator-overrides.yaml") - assert.Nil(t, err) - - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - defer connector.Dispose() - - tenantSelectSQL := connector.getTenantSelectSQL() - - assert.Equal(t, "select somename from someschema.sometable", tenantSelectSQL) -} - -func TestGetSchemaPlaceHolderDefault(t *testing.T) { - config, err := config.FromFile("../test/migrator.yaml") - assert.Nil(t, err) - - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - defer connector.Dispose() - - placeholder := connector.getSchemaPlaceHolder() - - assert.Equal(t, "{schema}", placeholder) -} - -func TestGetSchemaPlaceHolderOverride(t *testing.T) { - config, err := config.FromFile("../test/migrator-overrides.yaml") - assert.Nil(t, err) - - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - defer connector.Dispose() - - placeholder := connector.getSchemaPlaceHolder() - - assert.Equal(t, "[schema]", placeholder) -} - -func TestAddTenantAndApplyMigrations(t *testing.T) { - config, err := config.FromFile("../test/migrator.yaml") - assert.Nil(t, err) - - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - connector.Init() - defer connector.Dispose() - - dbMigrationsBefore, err := connector.GetDBMigrations() - assert.Nil(t, err) - - lenBefore := len(dbMigrationsBefore) - - t1 := time.Now().UnixNano() - t2 := time.Now().UnixNano() - t3 := time.Now().UnixNano() - - tenant1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantMigration, Contents: "drop table if exists {schema}.settings"} - tenant2 := types.Migration{Name: fmt.Sprintf("%v.sql", t2), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t2), MigrationType: types.MigrationTypeTenantMigration, Contents: "create table {schema}.settings (k int, v text)"} - tenant3 := types.Migration{Name: fmt.Sprintf("%v.sql", t3), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t3), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456')"} - - migrationsToApply := []types.Migration{tenant1, tenant2, tenant3} - - uniqueTenant := fmt.Sprintf("new_test_tenant_%v", time.Now().UnixNano()) - - connector.AddTenantAndApplyMigrations(newTestContext(), uniqueTenant, migrationsToApply) - - dbMigrationsAfter, err := connector.GetDBMigrations() - assert.Nil(t, err) - - lenAfter := len(dbMigrationsAfter) - - assert.Equal(t, 3, lenAfter-lenBefore) -} - -func TestMySQLGetMigrationInsertSQL(t *testing.T) { - config, err := config.FromFile("../test/migrator.yaml") - assert.Nil(t, err) - - config.Driver = "mysql" - - dialect, err := newDialect(config) - assert.Nil(t, err) - - insertMigrationSQL := dialect.GetMigrationInsertSQL() - - assert.Equal(t, "insert into migrator.migrator_migrations (name, source_dir, filename, type, db_schema, contents, checksum) values (?, ?, ?, ?, ?, ?, ?)", insertMigrationSQL) -} - -func TestPostgreSQLGetMigrationInsertSQL(t *testing.T) { - config, err := config.FromFile("../test/migrator.yaml") - assert.Nil(t, err) - - config.Driver = "postgres" - - dialect, err := newDialect(config) - assert.Nil(t, err) - - insertMigrationSQL := dialect.GetMigrationInsertSQL() - - assert.Equal(t, "insert into migrator.migrator_migrations (name, source_dir, filename, type, db_schema, contents, checksum) values ($1, $2, $3, $4, $5, $6, $7)", insertMigrationSQL) -} - -func TestMSSQLGetMigrationInsertSQL(t *testing.T) { - config, err := config.FromFile("../test/migrator.yaml") - assert.Nil(t, err) - - config.Driver = "sqlserver" - - dialect, err := newDialect(config) - assert.Nil(t, err) - - insertMigrationSQL := dialect.GetMigrationInsertSQL() - - assert.Equal(t, "insert into migrator.migrator_migrations (name, source_dir, filename, type, db_schema, contents, checksum) values (@p1, @p2, @p3, @p4, @p5, @p6, @p7)", insertMigrationSQL) -} - -func TestMySQLGetTenantInsertSQLDefault(t *testing.T) { - config, err := config.FromFile("../test/migrator.yaml") - assert.Nil(t, err) - - config.Driver = "mysql" - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - defer connector.Dispose() - - tenantInsertSQL := connector.getTenantInsertSQL() - - assert.Equal(t, "insert into migrator.migrator_tenants (name) values (?)", tenantInsertSQL) -} - -func TestPostgreSQLGetTenantInsertSQLDefault(t *testing.T) { - config, err := config.FromFile("../test/migrator.yaml") - assert.Nil(t, err) - - config.Driver = "postgres" - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - defer connector.Dispose() - - tenantInsertSQL := connector.getTenantInsertSQL() - - assert.Equal(t, "insert into migrator.migrator_tenants (name) values ($1)", tenantInsertSQL) -} - -func TestMSSQLGetTenantInsertSQLDefault(t *testing.T) { - config, err := config.FromFile("../test/migrator.yaml") - assert.Nil(t, err) - - config.Driver = "sqlserver" - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - defer connector.Dispose() - - tenantInsertSQL := connector.getTenantInsertSQL() - - assert.Equal(t, "insert into migrator.migrator_tenants (name) values (@p1)", tenantInsertSQL) -} - -func TestGetTenantInsertSQLOverride(t *testing.T) { - config, err := config.FromFile("../test/migrator-overrides.yaml") - assert.Nil(t, err) - - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - defer connector.Dispose() - - tenantInsertSQL := connector.getTenantInsertSQL() + results := connector.ApplyMigrations(types.ModeTypeApply, migrationsToApply) - assert.Equal(t, "insert into someschema.sometable (somename) values ($1)", tenantInsertSQL) + assert.Equal(t, 0, results.MigrationsGrandTotal) + assert.Equal(t, 0, results.ScriptsGrandTotal) } -func TestMSSQLDialectGetCreateTenantsTableSQL(t *testing.T) { - config, err := config.FromFile("../test/migrator.yaml") - assert.Nil(t, err) - - config.Driver = "sqlserver" - - dialect, err := newDialect(config) - assert.Nil(t, err) - - createTenantsTableSQL := dialect.GetCreateTenantsTableSQL() - - expected := ` -IF NOT EXISTS (select * from information_schema.tables where table_schema = 'migrator' and table_name = 'migrator_tenants') -BEGIN - create table [migrator].migrator_tenants ( - id int identity (1,1) primary key, - name varchar(200) not null, - created datetime default CURRENT_TIMESTAMP - ); -END -` - - assert.Equal(t, expected, createTenantsTableSQL) -} - -func TestMSSQLDialectGetCreateMigrationsTableSQL(t *testing.T) { - config, err := config.FromFile("../test/migrator.yaml") - assert.Nil(t, err) - - config.Driver = "sqlserver" - - dialect, err := newDialect(config) - assert.Nil(t, err) - - createMigrationsTableSQL := dialect.GetCreateMigrationsTableSQL() - - expected := ` -IF NOT EXISTS (select * from information_schema.tables where table_schema = 'migrator' and table_name = 'migrator_migrations') -BEGIN - create table [migrator].migrator_migrations ( - id int identity (1,1) primary key, - name varchar(200) not null, - source_dir varchar(200) not null, - filename varchar(200) not null, - type int not null, - db_schema varchar(200) not null, - created datetime default CURRENT_TIMESTAMP, - contents text, - checksum varchar(64) - ); -END -` - - assert.Equal(t, expected, createMigrationsTableSQL) -} - -func TestBaseDialectGetCreateTenantsTableSQL(t *testing.T) { - config, err := config.FromFile("../test/migrator.yaml") - assert.Nil(t, err) - - config.Driver = "postgres" - - dialect, err := newDialect(config) - assert.Nil(t, err) - - createTenantsTableSQL := dialect.GetCreateTenantsTableSQL() - - expected := ` -create table if not exists migrator.migrator_tenants ( - id serial primary key, - name varchar(200) not null, - created timestamp default now() -) -` - - assert.Equal(t, expected, createTenantsTableSQL) -} - -func TestBaseDialectGetCreateMigrationsTableSQL(t *testing.T) { - config, err := config.FromFile("../test/migrator.yaml") - assert.Nil(t, err) - - config.Driver = "postgres" - - dialect, err := newDialect(config) - assert.Nil(t, err) - - createMigrationsTableSQL := dialect.GetCreateMigrationsTableSQL() - - expected := ` -create table if not exists migrator.migrator_migrations ( - id serial primary key, - name varchar(200) not null, - source_dir varchar(200) not null, - filename varchar(200) not null, - type int not null, - db_schema varchar(200) not null, - created timestamp default now(), - contents text, - checksum varchar(64) -) -` - - assert.Equal(t, expected, createMigrationsTableSQL) -} - -func TestBaseDialectGetCreateSchemaSQL(t *testing.T) { - config, err := config.FromFile("../test/migrator.yaml") - assert.Nil(t, err) - - config.Driver = "postgres" - - dialect, err := newDialect(config) - assert.Nil(t, err) - - createSchemaSQL := dialect.GetCreateSchemaSQL("abc") - - expected := "create schema if not exists abc" - - assert.Equal(t, expected, createSchemaSQL) -} - -func TestMSSQLDialectGetCreateSchemaSQL(t *testing.T) { - config, err := config.FromFile("../test/migrator.yaml") - assert.Nil(t, err) - - config.Driver = "sqlserver" - - dialect, err := newDialect(config) - assert.Nil(t, err) - - createSchemaSQL := dialect.GetCreateSchemaSQL("def") - - expected := ` -IF NOT EXISTS (select * from information_schema.schemata where schema_name = 'def') -BEGIN - EXEC sp_executesql N'create schema def'; -END -` - - assert.Equal(t, expected, createSchemaSQL) -} - -func TestDoInitCannotBeginTransactionError(t *testing.T) { - db, mock, err := sqlmock.New() - assert.Nil(t, err) - - config := &config.Config{} - config.Driver = "sqlmock" - connector := baseConnector{config, nil, nil} - - mock.ExpectBegin().WillReturnError(errors.New("trouble maker")) - - err = connector.doInit(db) - assert.Equal(t, "Could not start DB transaction: trouble maker", err.Error()) - - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expectations: %s", err) - } -} - -func TestDoInitCannotCreateMigratorSchema(t *testing.T) { - db, mock, err := sqlmock.New() - assert.Nil(t, err) - - config := &config.Config{} - config.Driver = "postgres" - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - - mock.ExpectBegin() - // don't have to provide full SQL here - patterns at work - mock.ExpectQuery("create schema").WillReturnError(errors.New("trouble maker")) - - err = connector.doInit(db) - assert.Equal(t, "Could not create migrator schema: trouble maker", err.Error()) - - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expectations: %s", err) - } -} - -func TestDoInitCannotCreateMigratorMigrationsTable(t *testing.T) { - db, mock, err := sqlmock.New() - assert.Nil(t, err) - - config := &config.Config{} - config.Driver = "postgres" - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - - mock.ExpectBegin() - // don't have to provide full SQL here - patterns at work - mock.ExpectQuery("create schema").WillReturnRows() - mock.ExpectQuery("create table").WillReturnError(errors.New("trouble maker")) - - err = connector.doInit(db) - assert.Equal(t, "Could not create migrations table: trouble maker", err.Error()) - - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expectations: %s", err) - } -} - -func TestDoInitCannotCreateMigratorTenantsTable(t *testing.T) { - db, mock, err := sqlmock.New() - assert.Nil(t, err) - - config := &config.Config{} - config.Driver = "postgres" - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - - mock.ExpectBegin() - // don't have to provide full SQL here - patterns at work - mock.ExpectQuery("create schema").WillReturnRows() - mock.ExpectQuery("create table").WillReturnRows() - mock.ExpectQuery("create table").WillReturnError(errors.New("trouble maker")) - - err = connector.doInit(db) - assert.Equal(t, "Could not create default tenants table: trouble maker", err.Error()) - - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expectations: %s", err) - } -} - -func TestDBGetTenantsError(t *testing.T) { +func TestApplyMigrationsDryRunMode(t *testing.T) { db, mock, err := sqlmock.New() assert.Nil(t, err) config := &config.Config{} config.Driver = "postgres" - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - - // don't have to provide full SQL here - patterns at work - mock.ExpectQuery("select").WillReturnError(errors.New("trouble maker")) - - connector.db = db - - _, err = connector.GetTenants() - assert.Equal(t, "Could not query tenants: trouble maker", err.Error()) - - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expectations: %s", err) - } -} - -func TestDBGetMigrationsError(t *testing.T) { - db, mock, err := sqlmock.New() - assert.Nil(t, err) + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} - config := &config.Config{} - config.Driver = "postgres" - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - - // don't have to provide full SQL here - patterns at work - mock.ExpectQuery("select").WillReturnError(errors.New("trouble maker")) - - connector.db = db - - _, err = connector.GetDBMigrations() - assert.Equal(t, "Could not query DB migrations: trouble maker", err.Error()) - - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expectations: %s", err) - } -} - -func TestApplyTransactionBeginError(t *testing.T) { - db, mock, err := sqlmock.New() - assert.Nil(t, err) - - config := &config.Config{} - config.Driver = "postgres" - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - - rows := sqlmock.NewRows([]string{"name"}).AddRow("tenantname") - mock.ExpectQuery("select").WillReturnRows(rows) - mock.ExpectBegin().WillReturnError(errors.New("trouble maker tx.Begin()")) - - connector.db = db - - t1 := time.Now().UnixNano() - tenant1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} - migrationsToApply := []types.Migration{tenant1} - - err = connector.ApplyMigrations(newTestContext(), migrationsToApply) - assert.NotNil(t, err) - assert.Equal(t, "trouble maker tx.Begin()", err.Error()) - - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expectations: %s", err) - } -} - -func TestApplyInsertMigrationPreparedStatementError(t *testing.T) { - db, mock, err := sqlmock.New() - assert.Nil(t, err) - - config := &config.Config{} - config.Driver = "postgres" - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - - tenants := sqlmock.NewRows([]string{"name"}).AddRow("tenantname") - mock.ExpectQuery("select").WillReturnRows(tenants) - mock.ExpectBegin() - mock.ExpectPrepare("insert into").WillReturnError(errors.New("trouble maker")) - mock.ExpectRollback() - - connector.db = db - - t1 := time.Now().UnixNano() - tenant1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} - migrationsToApply := []types.Migration{tenant1} - - err = connector.ApplyMigrations(newTestContext(), migrationsToApply) - assert.Equal(t, "Could not create prepared statement: trouble maker", err.Error()) - - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expectations: %s", err) - } -} - -func TestApplyMigrationSQLError(t *testing.T) { - db, mock, err := sqlmock.New() - assert.Nil(t, err) - - config := &config.Config{} - config.Driver = "postgres" - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} + time := time.Now().UnixNano() + m := types.Migration{Name: fmt.Sprintf("%v.sql", time), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", time), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} + migrationsToApply := []types.Migration{m} - tenants := sqlmock.NewRows([]string{"name"}).AddRow("tenantname") + tenant := "tenantname" + tenants := sqlmock.NewRows([]string{"name"}).AddRow(tenant) mock.ExpectQuery("select").WillReturnRows(tenants) mock.ExpectBegin() mock.ExpectPrepare("insert into") - mock.ExpectExec("insert into").WillReturnError(errors.New("trouble maker")) + mock.ExpectExec("insert into").WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectPrepare("insert into").ExpectExec().WithArgs(m.Name, m.SourceDir, m.File, m.MigrationType, tenant, m.Contents, m.CheckSum).WillReturnResult(sqlmock.NewResult(1, 1)) + + // dry-run mode calls rollback instead of commit mock.ExpectRollback() - connector.db = db + // however the results contain correct dry-run data like number of applied migrations/scripts + results := connector.ApplyMigrations(types.ModeTypeDryRun, migrationsToApply) - t1 := time.Now().UnixNano() - tenant1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} - migrationsToApply := []types.Migration{tenant1} - - err = connector.ApplyMigrations(newTestContext(), migrationsToApply) - assert.Equal(t, "SQL migration failed: trouble maker", err.Error()) + assert.Equal(t, 1, results.MigrationsGrandTotal) if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled expectations: %s", err) } } -func TestApplyInsertMigrationError(t *testing.T) { +func TestApplyMigrationsSyncMode(t *testing.T) { db, mock, err := sqlmock.New() assert.Nil(t, err) config := &config.Config{} config.Driver = "postgres" - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} time := time.Now().UnixNano() m := types.Migration{Name: fmt.Sprintf("%v.sql", time), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", time), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} @@ -764,233 +197,175 @@ func TestApplyInsertMigrationError(t *testing.T) { mock.ExpectQuery("select").WillReturnRows(tenants) mock.ExpectBegin() mock.ExpectPrepare("insert into") - mock.ExpectExec("insert into").WillReturnResult(sqlmock.NewResult(1, 1)) - mock.ExpectPrepare("insert into").ExpectExec().WithArgs(m.Name, m.SourceDir, m.File, m.MigrationType, tenant, m.Contents, m.CheckSum).WillReturnError(errors.New("trouble maker")) - mock.ExpectRollback() + mock.ExpectPrepare("insert into").ExpectExec().WithArgs(m.Name, m.SourceDir, m.File, m.MigrationType, tenant, m.Contents, m.CheckSum).WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectCommit() - connector.db = db + // sync the results contain correct data like number of applied migrations/scripts + results := connector.ApplyMigrations(types.ModeTypeSync, migrationsToApply) - err = connector.ApplyMigrations(newTestContext(), migrationsToApply) - assert.Equal(t, "Failed to add migration entry: trouble maker", err.Error()) + assert.Equal(t, 1, results.MigrationsGrandTotal) if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled expectations: %s", err) } } -func TestAddTenantTransactionBeginError(t *testing.T) { - db, mock, err := sqlmock.New() - assert.Nil(t, err) - - config := &config.Config{} - config.Driver = "postgres" - dialect, err := newDialect(config) +func TestGetTenantsSQLDefault(t *testing.T) { + config, err := config.FromFile("../test/migrator.yaml") assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - - mock.ExpectBegin().WillReturnError(errors.New("trouble maker tx.Begin()")) - connector.db = db - - t1 := time.Now().UnixNano() - tenant1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} - migrationsToApply := []types.Migration{tenant1} + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, nil} + defer connector.Dispose() - err = connector.AddTenantAndApplyMigrations(newTestContext(), "newtenant", migrationsToApply) - assert.NotNil(t, err) - assert.Equal(t, "trouble maker tx.Begin()", err.Error()) + tenantSelectSQL := connector.getTenantSelectSQL() - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expectations: %s", err) - } + assert.Equal(t, "select name from migrator.migrator_tenants", tenantSelectSQL) } -func TestAddTenantAndApplyMigrationsCreateSchemaError(t *testing.T) { - db, mock, err := sqlmock.New() - assert.Nil(t, err) - - config := &config.Config{} - config.Driver = "postgres" - dialect, err := newDialect(config) +func TestGetTenantsSQLOverride(t *testing.T) { + config, err := config.FromFile("../test/migrator-overrides.yaml") assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - - mock.ExpectBegin() - mock.ExpectExec("create schema").WillReturnError(errors.New("trouble maker")) - mock.ExpectRollback() - connector.db = db - - t1 := time.Now().UnixNano() - tenant1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} - migrationsToApply := []types.Migration{tenant1} + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, nil} + defer connector.Dispose() - err = connector.AddTenantAndApplyMigrations(newTestContext(), "newtenant", migrationsToApply) - assert.Equal(t, "Create schema failed, transaction rollback was called: trouble maker", err.Error()) + tenantSelectSQL := connector.getTenantSelectSQL() - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expectations: %s", err) - } + assert.Equal(t, "select somename from someschema.sometable", tenantSelectSQL) } -func TestAddTenantAndApplyMigrationsInsertTenantPreparedStatementError(t *testing.T) { - db, mock, err := sqlmock.New() - assert.Nil(t, err) - - config := &config.Config{} - config.Driver = "postgres" - dialect, err := newDialect(config) +func TestGetSchemaPlaceHolderDefault(t *testing.T) { + config, err := config.FromFile("../test/migrator.yaml") assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - mock.ExpectBegin() - mock.ExpectExec("create schema").WillReturnResult(sqlmock.NewResult(1, 1)) - mock.ExpectPrepare("insert into").WillReturnError(errors.New("trouble maker")) - mock.ExpectRollback() - - connector.db = db - - t1 := time.Now().UnixNano() - tenant1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} - migrationsToApply := []types.Migration{tenant1} + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, nil} + defer connector.Dispose() - err = connector.AddTenantAndApplyMigrations(newTestContext(), "newtenant", migrationsToApply) - assert.Equal(t, "Could not create prepared statement: trouble maker", err.Error()) + placeholder := connector.getSchemaPlaceHolder() - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expectations: %s", err) - } + assert.Equal(t, "{schema}", placeholder) } -func TestAddTenantAndApplyMigrationsInsertTenantError(t *testing.T) { - db, mock, err := sqlmock.New() - assert.Nil(t, err) - - config := &config.Config{} - config.Driver = "postgres" - dialect, err := newDialect(config) +func TestGetSchemaPlaceHolderOverride(t *testing.T) { + config, err := config.FromFile("../test/migrator-overrides.yaml") assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} - - tenant := "tenant" - - mock.ExpectBegin() - mock.ExpectExec("create schema").WillReturnResult(sqlmock.NewResult(1, 1)) - mock.ExpectPrepare("insert into") - mock.ExpectPrepare("insert into").ExpectExec().WithArgs(tenant).WillReturnError(errors.New("trouble maker")) - mock.ExpectRollback() - - connector.db = db - t1 := time.Now().UnixNano() - m1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} - migrationsToApply := []types.Migration{m1} + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, nil} + defer connector.Dispose() - err = connector.AddTenantAndApplyMigrations(newTestContext(), tenant, migrationsToApply) - assert.Equal(t, "Failed to add tenant entry: trouble maker", err.Error()) + placeholder := connector.getSchemaPlaceHolder() - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expectations: %s", err) - } + assert.Equal(t, "[schema]", placeholder) } -func TestAddTenantAndApplyMigrationInsertMigrationPreparedStatementError(t *testing.T) { - db, mock, err := sqlmock.New() +func TestAddTenantAndApplyMigrations(t *testing.T) { + config, err := config.FromFile("../test/migrator.yaml") assert.Nil(t, err) - config := &config.Config{} - config.Driver = "postgres" - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} + connector := New(newTestContext(), config) + defer connector.Dispose() - tenant := "tenant" + t1 := time.Now().UnixNano() + t2 := time.Now().UnixNano() + t3 := time.Now().UnixNano() - mock.ExpectBegin() - mock.ExpectExec("create schema").WillReturnResult(sqlmock.NewResult(1, 1)) - mock.ExpectPrepare("insert into") - mock.ExpectPrepare("insert into").ExpectExec().WithArgs(tenant).WillReturnResult(sqlmock.NewResult(1, 1)) - mock.ExpectPrepare("insert into").WillReturnError(errors.New("trouble maker")) - mock.ExpectRollback() + tenant1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantMigration, Contents: "drop table if exists {schema}.settings"} + tenant2 := types.Migration{Name: fmt.Sprintf("%v.sql", t2), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t2), MigrationType: types.MigrationTypeTenantMigration, Contents: "create table {schema}.settings (k int, v text)"} + tenant3 := types.Migration{Name: fmt.Sprintf("%v.sql", t3), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t3), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456')"} - connector.db = db + migrationsToApply := []types.Migration{tenant1, tenant2, tenant3} - t1 := time.Now().UnixNano() - m1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} - migrationsToApply := []types.Migration{m1} + uniqueTenant := fmt.Sprintf("new_test_tenant_%v", time.Now().UnixNano()) - err = connector.AddTenantAndApplyMigrations(newTestContext(), tenant, migrationsToApply) - assert.Equal(t, "Could not create prepared statement: trouble maker", err.Error()) + results := connector.AddTenantAndApplyMigrations(types.ModeTypeApply, uniqueTenant, migrationsToApply) - if err := mock.ExpectationsWereMet(); err != nil { - t.Errorf("there were unfulfilled expectations: %s", err) - } + // applied only for one tenant - the newly added one + assert.Equal(t, 1, results.Tenants) + // just one tenant so total number of tenant migrations is equal to tenant migrations + assert.Equal(t, 3, results.TenantMigrations) + assert.Equal(t, 3, results.TenantMigrationsTotal) } -func TestAddTenantAndApplyMigrationMigrationSQLError(t *testing.T) { +func TestAddTenantAndApplyMigrationsDryRunMode(t *testing.T) { db, mock, err := sqlmock.New() assert.Nil(t, err) config := &config.Config{} config.Driver = "postgres" - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} - tenant := "tenant" + time := time.Now().UnixNano() + m := types.Migration{Name: fmt.Sprintf("%v.sql", time), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", time), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} + migrationsToApply := []types.Migration{m} + tenant := "tenantname" mock.ExpectBegin() mock.ExpectExec("create schema").WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectPrepare("insert into") mock.ExpectPrepare("insert into").ExpectExec().WithArgs(tenant).WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectPrepare("insert into") - mock.ExpectExec("insert into").WillReturnError(errors.New("trouble maker")) - mock.ExpectRollback() + mock.ExpectExec("insert into").WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectPrepare("insert into").ExpectExec().WithArgs(m.Name, m.SourceDir, m.File, m.MigrationType, tenant, m.Contents, m.CheckSum).WillReturnResult(sqlmock.NewResult(1, 1)) - connector.db = db + // dry-run mode calls rollback instead of commit + mock.ExpectRollback() - t1 := time.Now().UnixNano() - m1 := types.Migration{Name: fmt.Sprintf("%v.sql", t1), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", t1), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} - migrationsToApply := []types.Migration{m1} + // however the results contain correct dry-run data like number of applied migrations/scripts + results := connector.AddTenantAndApplyMigrations(types.ModeTypeDryRun, tenant, migrationsToApply) - err = connector.AddTenantAndApplyMigrations(newTestContext(), tenant, migrationsToApply) - assert.Equal(t, "SQL migration failed: trouble maker", err.Error()) + assert.Equal(t, 1, results.MigrationsGrandTotal) if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled expectations: %s", err) } } -func TestAddTenantAndApplyMigrationInsertMigrationError(t *testing.T) { +func TestAddTenantAndApplyMigrationsSyncMode(t *testing.T) { db, mock, err := sqlmock.New() assert.Nil(t, err) config := &config.Config{} config.Driver = "postgres" - dialect, err := newDialect(config) - assert.Nil(t, err) - connector := baseConnector{config, dialect, nil} + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, db} - tenant := "tenant" time := time.Now().UnixNano() m := types.Migration{Name: fmt.Sprintf("%v.sql", time), SourceDir: "tenants", File: fmt.Sprintf("tenants/%v.sql", time), MigrationType: types.MigrationTypeTenantMigration, Contents: "insert into {schema}.settings values (456, '456') "} migrationsToApply := []types.Migration{m} + tenant := "tenantname" mock.ExpectBegin() mock.ExpectExec("create schema").WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectPrepare("insert into") mock.ExpectPrepare("insert into").ExpectExec().WithArgs(tenant).WillReturnResult(sqlmock.NewResult(1, 1)) mock.ExpectPrepare("insert into") - mock.ExpectExec("insert into").WillReturnResult(sqlmock.NewResult(1, 1)) - mock.ExpectPrepare("insert into").ExpectExec().WithArgs(m.Name, m.SourceDir, m.File, m.MigrationType, tenant, m.Contents, m.CheckSum).WillReturnError(errors.New("trouble maker")) - mock.ExpectRollback() + mock.ExpectPrepare("insert into").ExpectExec().WithArgs(m.Name, m.SourceDir, m.File, m.MigrationType, tenant, m.Contents, m.CheckSum).WillReturnResult(sqlmock.NewResult(1, 1)) + mock.ExpectCommit() - connector.db = db + // sync results contain correct data like number of applied migrations/scripts + results := connector.AddTenantAndApplyMigrations(types.ModeTypeSync, tenant, migrationsToApply) - err = connector.AddTenantAndApplyMigrations(newTestContext(), tenant, migrationsToApply) - assert.Equal(t, "Failed to add migration entry: trouble maker", err.Error()) + assert.Equal(t, 1, results.MigrationsGrandTotal) if err := mock.ExpectationsWereMet(); err != nil { t.Errorf("there were unfulfilled expectations: %s", err) } } + +func TestGetTenantInsertSQLOverride(t *testing.T) { + config, err := config.FromFile("../test/migrator-overrides.yaml") + assert.Nil(t, err) + + dialect := newDialect(config) + connector := baseConnector{newTestContext(), config, dialect, nil} + defer connector.Dispose() + + tenantInsertSQL := connector.getTenantInsertSQL() + + assert.Equal(t, "insert into someschema.sometable (somename) values ($1)", tenantInsertSQL) +} diff --git a/loader/disk_loader.go b/loader/disk_loader.go index 1055bfe..1b04d54 100644 --- a/loader/disk_loader.go +++ b/loader/disk_loader.go @@ -1,10 +1,10 @@ package loader import ( + "context" "crypto/sha256" "encoding/hex" "io/ioutil" - "os" "path/filepath" "sort" "strings" @@ -17,47 +17,23 @@ import ( // diskLoader is struct used for implementing Loader interface for loading migrations from disk type diskLoader struct { + ctx context.Context config *config.Config } -// GetDiskMigrations returns all migrations from disk -func (dl *diskLoader) GetDiskMigrations() (migrations []types.Migration, err error) { - defer func() { - if r := recover(); r != nil { - var ok bool - err, ok = r.(error) - if !ok { - err = fmt.Errorf("%v", r) - } - } - }() - - migrations = []types.Migration{} +// GetSourceMigrations returns all migrations from disk +func (dl *diskLoader) GetSourceMigrations() []types.Migration { + migrations := []types.Migration{} absBaseDir, err := filepath.Abs(dl.config.BaseDir) if err != nil { - panic(err.Error()) - } - - var dirs []string - err = filepath.Walk(absBaseDir, func(path string, info os.FileInfo, err error) error { - if err != nil { - return err - } - if info.IsDir() { - dirs = append(dirs, path) - } - return nil - }) - - if err != nil { - panic(err.Error()) + panic(fmt.Sprintf("Could not convert baseDir to absolute path: %v", err.Error())) } - singleMigrationsDirs := dl.filterSchemaDirs(dirs, dl.config.SingleMigrations) - tenantMigrationsDirs := dl.filterSchemaDirs(dirs, dl.config.TenantMigrations) - singleScriptsDirs := dl.filterSchemaDirs(dirs, dl.config.SingleScripts) - tenantScriptsDirs := dl.filterSchemaDirs(dirs, dl.config.TenantScripts) + singleMigrationsDirs := dl.getDirs(absBaseDir, dl.config.SingleMigrations) + tenantMigrationsDirs := dl.getDirs(absBaseDir, dl.config.TenantMigrations) + singleScriptsDirs := dl.getDirs(absBaseDir, dl.config.SingleScripts) + tenantScriptsDirs := dl.getDirs(absBaseDir, dl.config.TenantScripts) migrationsMap := make(map[string][]types.Migration) @@ -79,17 +55,13 @@ func (dl *diskLoader) GetDiskMigrations() (migrations []types.Migration, err err } } - return + return migrations } -func (dl *diskLoader) filterSchemaDirs(dirs []string, migrationsDirs []string) []string { +func (dl *diskLoader) getDirs(baseDir string, migrationsDirs []string) []string { var filteredDirs []string - for _, dir := range dirs { - for _, migrationsDir := range migrationsDirs { - if strings.HasSuffix(dir, migrationsDir) { - filteredDirs = append(filteredDirs, dir) - } - } + for _, migrationsDir := range migrationsDirs { + filteredDirs = append(filteredDirs, filepath.Join(baseDir, migrationsDir)) } return filteredDirs } @@ -98,13 +70,14 @@ func (dl *diskLoader) readFromDirs(migrations map[string][]types.Migration, sour for _, sourceDir := range sourceDirs { files, err := ioutil.ReadDir(sourceDir) if err != nil { - panic(err.Error()) + panic(fmt.Sprintf("Could not read source dir %v: %v", sourceDir, err.Error())) } for _, file := range files { if !file.IsDir() { - contents, err := ioutil.ReadFile(filepath.Join(sourceDir, file.Name())) + fullPath := filepath.Join(sourceDir, file.Name()) + contents, err := ioutil.ReadFile(fullPath) if err != nil { - panic(err.Error()) + panic(fmt.Sprintf("Could not read file %v: %v", fullPath, err.Error())) } hasher := sha256.New() hasher.Write([]byte(contents)) diff --git a/loader/disk_loader_test.go b/loader/disk_loader_test.go index a6adea0..f5cf350 100644 --- a/loader/disk_loader_test.go +++ b/loader/disk_loader_test.go @@ -1,6 +1,7 @@ package loader import ( + "context" "testing" "github.com/lukaszbudnik/migrator/config" @@ -10,11 +11,49 @@ import ( func TestDiskReadDiskMigrationsNonExistingBaseDirError(t *testing.T) { var config config.Config config.BaseDir = "xyzabc" + config.SingleMigrations = []string{"migrations/config"} - loader := NewLoader(&config) + loader := New(context.TODO(), &config) - _, err := loader.GetDiskMigrations() - assert.Contains(t, err.Error(), "xyzabc: no such file or directory") + didPanic := false + var message interface{} + func() { + + defer func() { + if message = recover(); message != nil { + didPanic = true + } + }() + + loader.GetSourceMigrations() + + }() + assert.True(t, didPanic) + assert.Contains(t, message, "xyzabc/migrations/config: no such file or directory") +} + +func TestDiskReadDiskMigrationsNonExistingMigrationsDirError(t *testing.T) { + var config config.Config + config.BaseDir = "../test" + config.SingleMigrations = []string{"migrations/abcdef"} + + loader := New(context.TODO(), &config) + + didPanic := false + var message interface{} + func() { + + defer func() { + if message = recover(); message != nil { + didPanic = true + } + }() + + loader.GetSourceMigrations() + + }() + assert.True(t, didPanic) + assert.Contains(t, message, "test/migrations/abcdef: no such file or directory") } func TestDiskGetDiskMigrations(t *testing.T) { @@ -25,9 +64,8 @@ func TestDiskGetDiskMigrations(t *testing.T) { config.SingleScripts = []string{"migrations/config-scripts"} config.TenantScripts = []string{"migrations/tenants-scripts"} - loader := NewLoader(&config) - migrations, err := loader.GetDiskMigrations() - assert.Nil(t, err) + loader := New(context.TODO(), &config) + migrations := loader.GetSourceMigrations() assert.Len(t, migrations, 10) diff --git a/loader/loader.go b/loader/loader.go index 5be18da..441f049 100644 --- a/loader/loader.go +++ b/loader/loader.go @@ -1,16 +1,21 @@ package loader import ( + "context" + "github.com/lukaszbudnik/migrator/config" "github.com/lukaszbudnik/migrator/types" ) // Loader interface abstracts all loading operations performed by migrator type Loader interface { - GetDiskMigrations() ([]types.Migration, error) + GetSourceMigrations() []types.Migration } -// NewLoader returns new instance of Loader, currently DiskLoader is available -func NewLoader(config *config.Config) Loader { - return &diskLoader{config} +// Factory is a factory method for creating Loader instance +type Factory func(context.Context, *config.Config) Loader + +// New returns new instance of Loader, currently DiskLoader is available +func New(ctx context.Context, config *config.Config) Loader { + return &diskLoader{ctx, config} } diff --git a/migrations/migrations.go b/migrations/migrations.go deleted file mode 100644 index f908396..0000000 --- a/migrations/migrations.go +++ /dev/null @@ -1,111 +0,0 @@ -package migrations - -import ( - "context" - - "github.com/lukaszbudnik/migrator/common" - "github.com/lukaszbudnik/migrator/types" -) - -func flattenMigrationDBs(dbMigrations []types.MigrationDB) []types.Migration { - var flattened []types.Migration - var previousMigration types.Migration - for i, m := range dbMigrations { - if i == 0 || m.Migration != previousMigration { - flattened = append(flattened, m.Migration) - previousMigration = m.Migration - } - } - return flattened -} - -// difference returns the elements on disk which are not yet in DB -// the exceptions are MigrationTypeSingleScript and MigrationTypeTenantScript which are always run -func difference(diskMigrations []types.Migration, flattenedMigrationDBs []types.Migration) []types.Migration { - // key is Migration.File - existsInDB := map[string]bool{} - for _, m := range flattenedMigrationDBs { - if m.MigrationType != types.MigrationTypeSingleScript && m.MigrationType != types.MigrationTypeTenantScript { - existsInDB[m.File] = true - } - } - diff := []types.Migration{} - for _, m := range diskMigrations { - if _, ok := existsInDB[m.File]; !ok { - diff = append(diff, m) - } - } - return diff -} - -// intersect returns the elements on disk and in DB -func intersect(diskMigrations []types.Migration, flattenedMigrationDBs []types.Migration) []struct { - disk types.Migration - db types.Migration -} { - // key is Migration.File - existsInDB := map[string]types.Migration{} - for _, m := range flattenedMigrationDBs { - existsInDB[m.File] = m - } - intersect := []struct { - disk types.Migration - db types.Migration - }{} - for _, m := range diskMigrations { - if db, ok := existsInDB[m.File]; ok { - intersect = append(intersect, struct { - disk types.Migration - db types.Migration - }{m, db}) - } - } - return intersect -} - -// ComputeMigrationsToApply computes which disk migrations should be applied to DB based on migrations already present in DB -func ComputeMigrationsToApply(ctx context.Context, diskMigrations []types.Migration, dbMigrations []types.MigrationDB) []types.Migration { - flattenedMigrationDBs := flattenMigrationDBs(dbMigrations) - - len := len(flattenedMigrationDBs) - common.LogInfo(ctx, "Number of flattened DB migrations: %d", len) - - out := difference(diskMigrations, flattenedMigrationDBs) - - return out -} - -// FilterTenantMigrations returns only migrations which are of type MigrationTypeTenantSchema -func FilterTenantMigrations(ctx context.Context, diskMigrations []types.Migration) []types.Migration { - filteredTenantMigrations := []types.Migration{} - for _, m := range diskMigrations { - if m.MigrationType == types.MigrationTypeTenantMigration { - filteredTenantMigrations = append(filteredTenantMigrations, m) - } - } - - len := len(filteredTenantMigrations) - common.LogInfo(ctx, "Number of filtered tenant DB migrations: %d", len) - - return filteredTenantMigrations -} - -// VerifyCheckSums verifies if CheckSum of disk and flattened DB migrations match -// returns bool indicating if offending (i.e., modified) disk migrations were found -// if bool is false the function returns a slice of offending migrations -// if bool is true the slice of effending migrations is empty -func VerifyCheckSums(diskMigrations []types.Migration, dbMigrations []types.MigrationDB) (bool, []types.Migration) { - - flattenedMigrationDBs := flattenMigrationDBs(dbMigrations) - - intersect := intersect(diskMigrations, flattenedMigrationDBs) - var offendingMigrations []types.Migration - var result = true - for _, t := range intersect { - if t.disk.CheckSum != t.db.CheckSum { - offendingMigrations = append(offendingMigrations, t.disk) - result = false - } - } - return result, offendingMigrations -} diff --git a/migrator.go b/migrator.go index 70e22e1..e9892c9 100644 --- a/migrator.go +++ b/migrator.go @@ -2,11 +2,17 @@ package main import ( "bytes" + "context" "flag" - "log" "os" + "github.com/gin-gonic/gin" + "github.com/lukaszbudnik/migrator/common" "github.com/lukaszbudnik/migrator/config" + "github.com/lukaszbudnik/migrator/coordinator" + "github.com/lukaszbudnik/migrator/db" + "github.com/lukaszbudnik/migrator/loader" + "github.com/lukaszbudnik/migrator/notifications" "github.com/lukaszbudnik/migrator/server" ) @@ -26,9 +32,7 @@ var GitCommitDate string func main() { - log.SetFlags(log.Ldate | log.Ltime | log.Lmicroseconds | log.LUTC) - - log.Printf("INFO migrator version %v, build %v, date %v", GitBranch, GitCommitSha, GitCommitDate) + common.Log("INFO", "migrator version %v, build %v, date %v", GitBranch, GitCommitSha, GitCommitDate) flag := flag.NewFlagSet(os.Args[0], flag.ContinueOnError) buf := new(bytes.Buffer) @@ -36,22 +40,27 @@ func main() { var configFile string flag.StringVar(&configFile, "configFile", DefaultConfigFile, "path to migrator configuration yaml file") - err := flag.Parse(os.Args[1:]) - if err != nil { - log.Fatal(buf) + if err := flag.Parse(os.Args[1:]); err != nil { + common.Log("ERROR", buf.String()) os.Exit(1) } - config, err := config.FromFile(configFile) + cfg, err := config.FromFile(configFile) if err != nil { - log.Fatalf("ERROR Error reading config file: %v", err) + common.Log("ERROR", "Error reading config file: %v", err) + os.Exit(1) } - srv, err := server.Start(config) - if err != nil { - log.Fatalf("ERROR Error starting: %v", err) + var createCoordinator = func(ctx context.Context, config *config.Config) coordinator.Coordinator { + coordinator := coordinator.New(ctx, config, db.New, loader.New, notifications.New) + return coordinator + } + + gin.SetMode(gin.ReleaseMode) + g := server.SetupRouter(cfg, createCoordinator) + if err := g.Run(":" + server.GetPort(cfg)); err != nil { + common.Log("ERROR", "Error starting migrator: %v", err) } - defer srv.Close() } diff --git a/notifications/notifications.go b/notifications/notifications.go index f0f1014..7302224 100644 --- a/notifications/notifications.go +++ b/notifications/notifications.go @@ -2,6 +2,7 @@ package notifications import ( "bytes" + "context" "fmt" "io/ioutil" "net/http" @@ -25,9 +26,7 @@ type baseNotifier struct { config *config.Config } -func (bn *baseNotifier) Notify(text string) (string, error) { - - message := strings.Replace(bn.config.WebHookTemplate, textPlaceHolder, text, -1) +func (bn *baseNotifier) Notify(message string) (string, error) { reader := bytes.NewReader([]byte(message)) url := bn.config.WebHookURL @@ -70,10 +69,13 @@ func (sn *noopNotifier) Notify(text string) (string, error) { return "noop", nil } -// NewNotifier creates Notifier object based on config passed -func NewNotifier(config *config.Config) Notifier { - // webhook URL and template are required - if len(config.WebHookURL) > 0 && len(config.WebHookTemplate) > 0 { +// Factory is a factory method for creating Loader instance +type Factory func(context.Context, *config.Config) Notifier + +// New creates Notifier object based on config passed +func New(ctx context.Context, config *config.Config) Notifier { + // webhook URL is required + if len(config.WebHookURL) > 0 { return &baseNotifier{config} } // otherwise return noop diff --git a/notifications/notifications_test.go b/notifications/notifications_test.go index bd67b8d..27c549b 100644 --- a/notifications/notifications_test.go +++ b/notifications/notifications_test.go @@ -1,6 +1,7 @@ package notifications import ( + "context" "io/ioutil" "net/http" "net/http/httptest" @@ -12,7 +13,7 @@ import ( func TestNoopNotifier(t *testing.T) { config := config.Config{} - notifier := NewNotifier(&config) + notifier := New(context.TODO(), &config) result, err := notifier.Notify("abc") assert.Equal(t, "noop", result) @@ -34,12 +35,12 @@ func TestWebHookNotifier(t *testing.T) { }()) config := config.Config{} - config.WebHookTemplate = `{"text": "{text}","icon_emoji": ":white_check_mark:"}` config.WebHookURL = server.URL - notifier := NewNotifier(&config) + notifier := New(context.TODO(), &config) - result, err := notifier.Notify("abc") + message := `{"text": "abc","icon_emoji": ":white_check_mark:"}` + result, err := notifier.Notify(message) assert.Nil(t, err) assert.Equal(t, `{"result": "ok"}`, result) @@ -64,11 +65,10 @@ func TestWebHookNotifierCustomHeaders(t *testing.T) { }()) config := config.Config{} - config.WebHookTemplate = `{"text": "{text}","icon_emoji": ":white_check_mark:"}` config.WebHookURL = server.URL config.WebHookHeaders = []string{"Authorization: Basic QWxhZGRpbjpPcGVuU2VzYW1l", "Content-Type: application/x-yaml", "X-CustomHeader: value1,value2"} - notifier := NewNotifier(&config) + notifier := New(context.TODO(), &config) result, err := notifier.Notify("abc") @@ -82,8 +82,7 @@ func TestWebHookNotifierCustomHeaders(t *testing.T) { func TestWebHookURLError(t *testing.T) { config := config.Config{} config.WebHookURL = "xczxcvv" - config.WebHookTemplate = "not imporant for this test" - notifier := NewNotifier(&config) + notifier := New(context.TODO(), &config) result, err := notifier.Notify("abc") assert.NotNil(t, err) diff --git a/server/server.go b/server/server.go index 1a2e9c0..17435fb 100644 --- a/server/server.go +++ b/server/server.go @@ -2,350 +2,228 @@ package server import ( "context" - "encoding/json" "fmt" - "io/ioutil" - "log" + "net" "net/http" + "os" + "runtime/debug" "strings" "time" + "github.com/gin-gonic/gin" + "github.com/gin-gonic/gin/binding" + "github.com/go-playground/validator" + "github.com/lukaszbudnik/migrator/common" "github.com/lukaszbudnik/migrator/config" - "github.com/lukaszbudnik/migrator/db" - "github.com/lukaszbudnik/migrator/loader" - "github.com/lukaszbudnik/migrator/migrations" - "github.com/lukaszbudnik/migrator/notifications" + "github.com/lukaszbudnik/migrator/coordinator" "github.com/lukaszbudnik/migrator/types" ) const ( defaultPort string = "8080" - requestIDHeader string = "X-Request-Id" + requestIDHeader string = "X-Request-ID" ) -func getPort(config *config.Config) string { - if len(strings.TrimSpace(config.Port)) == 0 { - return defaultPort - } - return config.Port -} - -func sendNotification(ctx context.Context, config *config.Config, text string) { - notifier := notifications.NewNotifier(config) - resp, err := notifier.Notify(text) - - if err != nil { - common.LogError(ctx, "Notifier err: %v", err) - } else { - common.LogInfo(ctx, "Notifier response: %v", resp) - } -} - -func createAndInitLoader(config *config.Config, newLoader func(*config.Config) loader.Loader) loader.Loader { - loader := newLoader(config) - return loader +type migrationsPostRequest struct { + Response types.MigrationsResponseType `json:"response" binding:"required,response"` + Mode types.MigrationsModeType `json:"mode" binding:"required,mode"` } -func createAndInitConnector(config *config.Config, newConnector func(*config.Config) (db.Connector, error)) (db.Connector, error) { - connector, err := newConnector(config) - if err != nil { - return nil, err - } - if err := connector.Init(); err != nil { - return nil, err - } - return connector, nil +type tenantsPostRequest struct { + Name string `json:"name" binding:"required"` + migrationsPostRequest } -func errorResponse(w http.ResponseWriter, errorStatus int, response interface{}) { - w.WriteHeader(errorStatus) - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(response) +type migrationsSuccessResponse struct { + Results *types.MigrationResults `json:"results"` + AppliedMigrations []types.Migration `json:"appliedMigrations,omitempty"` } -func errorResponseStatusErrorMessage(w http.ResponseWriter, errorStatus int, errorMessage string) { - errorResponse(w, errorStatus, struct{ ErrorMessage string }{errorMessage}) +type errorResponse struct { + ErrorMessage string `json:"error"` + Details interface{} `json:"details,omitempty"` } -func errorDefaultResponse(w http.ResponseWriter, errorStatus int) { - errorResponseStatusErrorMessage(w, errorStatus, http.StatusText(errorStatus)) -} - -func errorInternalServerErrorResponse(w http.ResponseWriter, err error) { - errorResponseStatusErrorMessage(w, http.StatusInternalServerError, err.Error()) -} - -func jsonResponse(w http.ResponseWriter, response interface{}) { - w.Header().Set("Content-Type", "application/json") - json.NewEncoder(w).Encode(response) +// GetPort gets the port from config or defaultPort +func GetPort(config *config.Config) string { + if len(strings.TrimSpace(config.Port)) == 0 { + return defaultPort + } + return config.Port } -func tracing() func(http.Handler) http.Handler { - return func(next http.Handler) http.Handler { - return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) { - // requestID - requestID := r.Header.Get(requestIDHeader) - if requestID == "" { - requestID = fmt.Sprintf("%d", time.Now().UnixNano()) +func requestIDHandler() gin.HandlerFunc { + return func(c *gin.Context) { + requestID := c.Request.Header.Get(requestIDHeader) + if requestID == "" { + requestID = fmt.Sprintf("%d", time.Now().UnixNano()) + } + ctx := context.WithValue(c.Request.Context(), common.RequestIDKey{}, requestID) + c.Request = c.Request.WithContext(ctx) + c.Next() + } +} + +func recovery() gin.HandlerFunc { + return func(c *gin.Context) { + defer func() { + if err := recover(); err != nil { + // Check for a broken connection, as it is not really a + // condition that warrants a panic stack trace. + var brokenPipe bool + if ne, ok := err.(*net.OpError); ok { + if se, ok := ne.Err.(*os.SyscallError); ok { + if strings.Contains(strings.ToLower(se.Error()), "broken pipe") || strings.Contains(strings.ToLower(se.Error()), "connection reset by peer") { + brokenPipe = true + } + } + } + + // If the connection is dead, we can't write a status to it. + if brokenPipe { + common.LogPanic(c.Request.Context(), "Broken pipe: %v", err) + c.Error(err.(error)) // nolint: errcheck + c.Abort() + } else { + common.LogPanic(c.Request.Context(), "Panic recovered: %v", err) + if gin.IsDebugging() { + debug.PrintStack() + } + c.AbortWithStatusJSON(http.StatusInternalServerError, &errorResponse{err.(string), nil}) + } } - ctx := context.WithValue(r.Context(), common.RequestIDKey{}, requestID) - // action - action := fmt.Sprintf("%v %v", r.Method, r.RequestURI) - ctx = context.WithValue(ctx, common.ActionKey{}, action) - next.ServeHTTP(w, r.WithContext(ctx)) - }) + }() + c.Next() } } -func makeHandler(handler func(w http.ResponseWriter, r *http.Request, config *config.Config, newConnector func(*config.Config) (db.Connector, error), newLoader func(*config.Config) loader.Loader), config *config.Config, newConnector func(*config.Config) (db.Connector, error), newLoader func(*config.Config) loader.Loader) http.HandlerFunc { - return func(w http.ResponseWriter, r *http.Request) { - handler(w, r, config, newConnector, newLoader) +func requestLoggerHandler() gin.HandlerFunc { + return func(c *gin.Context) { + common.LogInfo(c.Request.Context(), "clientIP=%v method=%v request=%v", c.ClientIP(), c.Request.Method, c.Request.URL.RequestURI()) + c.Next() } } -func configHandler(w http.ResponseWriter, r *http.Request, config *config.Config, newConnector func(*config.Config) (db.Connector, error), newLoader func(*config.Config) loader.Loader) { - if r.Method != http.MethodGet { - common.LogError(r.Context(), "Wrong method") - errorDefaultResponse(w, http.StatusMethodNotAllowed) - return +func makeHandler(config *config.Config, newCoordinator func(context.Context, *config.Config) coordinator.Coordinator, handler func(*gin.Context, *config.Config, func(context.Context, *config.Config) coordinator.Coordinator)) gin.HandlerFunc { + return func(c *gin.Context) { + handler(c, config, newCoordinator) } - common.LogInfo(r.Context(), "returning config file") - w.Header().Set("Content-Type", "application/x-yaml") - fmt.Fprintf(w, "%v", config) } -func diskMigrationsHandler(w http.ResponseWriter, r *http.Request, config *config.Config, newConnector func(*config.Config) (db.Connector, error), newLoader func(*config.Config) loader.Loader) { - if r.Method != http.MethodGet { - common.LogError(r.Context(), "Wrong method") - errorDefaultResponse(w, http.StatusMethodNotAllowed) - return - } - common.LogInfo(r.Context(), "Start") - loader := createAndInitLoader(config, newLoader) - diskMigrations, err := loader.GetDiskMigrations() - if err != nil { - common.LogError(r.Context(), "Error getting disk migrations: %v", err.Error()) - errorInternalServerErrorResponse(w, err) - return - } - common.LogInfo(r.Context(), "Returning disk migrations: %v", len(diskMigrations)) - jsonResponse(w, diskMigrations) +func configHandler(c *gin.Context, config *config.Config, newCoordinator func(context.Context, *config.Config) coordinator.Coordinator) { + c.YAML(200, config) } -func migrationsHandler(w http.ResponseWriter, r *http.Request, config *config.Config, newConnector func(*config.Config) (db.Connector, error), newLoader func(*config.Config) loader.Loader) { - if r.Method != http.MethodGet && r.Method != http.MethodPost { - common.LogError(r.Context(), "Wrong method: %v", r.Method) - errorDefaultResponse(w, http.StatusMethodNotAllowed) - return - } - common.LogInfo(r.Context(), "Start") - if r.Method == http.MethodGet { - migrationsGetHandler(w, r, config, newConnector, newLoader) - } - if r.Method == http.MethodPost { - migrationsPostHandler(w, r, config, newConnector, newLoader) - } +func migrationsSourceHandler(c *gin.Context, config *config.Config, newCoordinator func(context.Context, *config.Config) coordinator.Coordinator) { + coordinator := newCoordinator(c.Request.Context(), config) + defer coordinator.Dispose() + migrations := coordinator.GetSourceMigrations() + common.LogInfo(c.Request.Context(), "Returning source migrations: %v", len(migrations)) + c.JSON(http.StatusOK, migrations) } -func migrationsGetHandler(w http.ResponseWriter, r *http.Request, config *config.Config, newConnector func(*config.Config) (db.Connector, error), newLoader func(*config.Config) loader.Loader) { - connector, err := createAndInitConnector(config, newConnector) - if err != nil { - common.LogError(r.Context(), "Error creating connector: %v", err.Error()) - errorInternalServerErrorResponse(w, err) - return - } - defer connector.Dispose() - dbMigrations, err := connector.GetDBMigrations() - if err != nil { - common.LogError(r.Context(), "Error getting DB migrations: %v", err.Error()) - errorInternalServerErrorResponse(w, err) - return - } - common.LogInfo(r.Context(), "Returning DB migrations: %v", len(dbMigrations)) - jsonResponse(w, dbMigrations) +func migrationsAppliedHandler(c *gin.Context, config *config.Config, newCoordinator func(context.Context, *config.Config) coordinator.Coordinator) { + coordinator := newCoordinator(c.Request.Context(), config) + defer coordinator.Dispose() + dbMigrations := coordinator.GetAppliedMigrations() + common.LogInfo(c.Request.Context(), "Returning applied migrations: %v", len(dbMigrations)) + c.JSON(http.StatusOK, dbMigrations) } -func migrationsPostHandler(w http.ResponseWriter, r *http.Request, config *config.Config, newConnector func(*config.Config) (db.Connector, error), newLoader func(*config.Config) loader.Loader) { - loader := createAndInitLoader(config, newLoader) - connector, err := createAndInitConnector(config, newConnector) - if err != nil { - common.LogError(r.Context(), "Error creating connector: %v", err.Error()) - errorInternalServerErrorResponse(w, err) - return - } - defer connector.Dispose() +func migrationsPostHandler(c *gin.Context, config *config.Config, newCoordinator func(context.Context, *config.Config) coordinator.Coordinator) { + var request migrationsPostRequest - diskMigrations, err := loader.GetDiskMigrations() - if err != nil { - common.LogError(r.Context(), "Error getting disk migrations: %v", err.Error()) - errorInternalServerErrorResponse(w, err) - return - } - - dbMigrations, err := connector.GetDBMigrations() - if err != nil { - common.LogError(r.Context(), "Error getting DB migrations: %v", err.Error()) - errorInternalServerErrorResponse(w, err) + if err := c.ShouldBindJSON(&request); err != nil { + common.LogError(c.Request.Context(), "Error reading request: %v", err.Error()) + c.AbortWithStatusJSON(http.StatusBadRequest, errorResponse{"Invalid request, please see documentation for valid JSON payload", nil}) return } - verified, offendingMigrations := migrations.VerifyCheckSums(diskMigrations, dbMigrations) + coordinator := newCoordinator(c.Request.Context(), config) + defer coordinator.Dispose() - if !verified { - common.LogError(r.Context(), "Checksum verification failed for migrations: %v", len(offendingMigrations)) - errorResponse(w, http.StatusFailedDependency, struct { - ErrorMessage string - OffendingMigrations []types.Migration - }{"Checksum verification failed. Please review offending migrations.", offendingMigrations}) + if ok, offendingMigrations := coordinator.VerifySourceMigrationsCheckSums(); !ok { + common.LogError(c.Request.Context(), "Checksum verification failed for migrations: %v", len(offendingMigrations)) + c.AbortWithStatusJSON(http.StatusFailedDependency, errorResponse{"Checksum verification failed. Please review offending migrations.", offendingMigrations}) return } - migrationsToApply := migrations.ComputeMigrationsToApply(r.Context(), diskMigrations, dbMigrations) - common.LogInfo(r.Context(), "Found migrations to apply: %d", len(migrationsToApply)) + results, appliedMigrations := coordinator.ApplyMigrations(request.Mode) - err = connector.ApplyMigrations(r.Context(), migrationsToApply) - if err != nil { - common.LogError(r.Context(), "Error applying migrations: %v", err.Error()) - errorInternalServerErrorResponse(w, err) - return - } - - text := fmt.Sprintf("Applied migrations: %v", len(migrationsToApply)) - sendNotification(r.Context(), config, text) - - common.LogInfo(r.Context(), "Returning applied migrations: %v", len(migrationsToApply)) - jsonResponse(w, migrationsToApply) -} + common.LogInfo(c.Request.Context(), "Returning applied migrations: %v", len(appliedMigrations)) -func tenantsHandler(w http.ResponseWriter, r *http.Request, config *config.Config, newConnector func(*config.Config) (db.Connector, error), newLoader func(*config.Config) loader.Loader) { - if r.Method != http.MethodGet && r.Method != http.MethodPost { - common.LogError(r.Context(), "Wrong method") - errorDefaultResponse(w, http.StatusMethodNotAllowed) - return + var response *migrationsSuccessResponse + if request.Response == types.ResponseTypeFull { + response = &migrationsSuccessResponse{results, appliedMigrations} + } else { + response = &migrationsSuccessResponse{results, nil} } - common.LogInfo(r.Context(), "Start") - if r.Method == http.MethodGet { - tenantsGetHandler(w, r, config, newConnector, newLoader) - } - if r.Method == http.MethodPost { - tenantsPostHandler(w, r, config, newConnector, newLoader) - } + c.JSON(http.StatusOK, response) } -func tenantsGetHandler(w http.ResponseWriter, r *http.Request, config *config.Config, newConnector func(*config.Config) (db.Connector, error), newLoader func(*config.Config) loader.Loader) { - connector, err := createAndInitConnector(config, newConnector) - if err != nil { - common.LogError(r.Context(), "Error creating connector: %v", err.Error()) - errorInternalServerErrorResponse(w, err) - return - } - defer connector.Dispose() - tenants, err := connector.GetTenants() - if err != nil { - common.LogError(r.Context(), "Error getting tenants: %v", err.Error()) - errorInternalServerErrorResponse(w, err) - return - } - common.LogInfo(r.Context(), "Returning tenants: %v", len(tenants)) - jsonResponse(w, tenants) +func tenantsGetHandler(c *gin.Context, config *config.Config, newCoordinator func(context.Context, *config.Config) coordinator.Coordinator) { + coordinator := newCoordinator(c.Request.Context(), config) + defer coordinator.Dispose() + tenants := coordinator.GetTenants() + common.LogInfo(c.Request.Context(), "Returning tenants: %v", len(tenants)) + c.JSON(http.StatusOK, tenants) } -func tenantsPostHandler(w http.ResponseWriter, r *http.Request, config *config.Config, newConnector func(*config.Config) (db.Connector, error), newLoader func(*config.Config) loader.Loader) { - loader := createAndInitLoader(config, newLoader) - connector, err := createAndInitConnector(config, newConnector) +func tenantsPostHandler(c *gin.Context, config *config.Config, newCoordinator func(context.Context, *config.Config) coordinator.Coordinator) { + var request tenantsPostRequest + err := c.ShouldBindJSON(&request) if err != nil { - common.LogError(r.Context(), "Error creating connector: %v", err.Error()) - errorInternalServerErrorResponse(w, err) + common.LogError(c.Request.Context(), "Bad request: %v", err.Error()) + c.AbortWithStatusJSON(http.StatusBadRequest, errorResponse{"Invalid request, please see documentation for valid JSON payload", nil}) return } - defer connector.Dispose() - body, err := ioutil.ReadAll(r.Body) - if err != nil { - common.LogError(r.Context(), "Error reading request: %v", err.Error()) - errorInternalServerErrorResponse(w, err) - return - } - var tenant struct { - Name string `json:"name"` - } - err = json.Unmarshal(body, &tenant) - if err != nil || tenant.Name == "" { - common.LogError(r.Context(), "Bad request: %v", err.Error()) - errorResponseStatusErrorMessage(w, http.StatusBadRequest, err.Error()) - return - } + coordinator := newCoordinator(c.Request.Context(), config) + defer coordinator.Dispose() - diskMigrations, err := loader.GetDiskMigrations() - if err != nil { - common.LogError(r.Context(), "Error getting disk migrations: %v", err.Error()) - errorInternalServerErrorResponse(w, err) + if ok, offendingMigrations := coordinator.VerifySourceMigrationsCheckSums(); !ok { + common.LogError(c.Request.Context(), "Checksum verification failed for migrations: %v", len(offendingMigrations)) + c.AbortWithStatusJSON(http.StatusFailedDependency, errorResponse{"Checksum verification failed. Please review offending migrations.", offendingMigrations}) return } - dbMigrations, err := connector.GetDBMigrations() - if err != nil { - common.LogError(r.Context(), "Error getting DB migrations: %v", err.Error()) - errorInternalServerErrorResponse(w, err) - return - } + results, appliedMigrations := coordinator.AddTenantAndApplyMigrations(request.Mode, request.Name) - verified, offendingMigrations := migrations.VerifyCheckSums(diskMigrations, dbMigrations) + common.LogInfo(c.Request.Context(), "Tenant %v added, migrations applied: %v", request.Name, len(appliedMigrations)) - if !verified { - common.LogError(r.Context(), "Checksum verification failed for migrations: %v", len(offendingMigrations)) - errorResponse(w, http.StatusFailedDependency, struct { - ErrorMessage string - OffendingMigrations []types.Migration - }{"Checksum verification failed. Please review offending migrations.", offendingMigrations}) - return - } - - // filter only tenant schemas - migrationsToApply := migrations.FilterTenantMigrations(r.Context(), diskMigrations) - common.LogInfo(r.Context(), "Found migrations to apply: %d", len(migrationsToApply)) - - err = connector.AddTenantAndApplyMigrations(r.Context(), tenant.Name, migrationsToApply) - if err != nil { - common.LogError(r.Context(), "Error adding new tenant: %v", err.Error()) - errorInternalServerErrorResponse(w, err) - return + var response *migrationsSuccessResponse + if request.Response == types.ResponseTypeFull { + response = &migrationsSuccessResponse{results, appliedMigrations} + } else { + response = &migrationsSuccessResponse{results, nil} } - text := fmt.Sprintf("Tenant %q added, migrations applied: %v", tenant.Name, len(migrationsToApply)) - sendNotification(r.Context(), config, text) - - common.LogInfo(r.Context(), text) - jsonResponse(w, migrationsToApply) + c.JSON(http.StatusOK, response) } -func registerHandlers(config *config.Config, newConnector func(*config.Config) (db.Connector, error), newLoader func(*config.Config) loader.Loader) *http.ServeMux { - router := http.NewServeMux() - router.Handle("/", http.NotFoundHandler()) - router.Handle("/config", makeHandler(configHandler, config, nil, nil)) - router.Handle("/diskMigrations", makeHandler(diskMigrationsHandler, config, nil, newLoader)) - router.Handle("/migrations", makeHandler(migrationsHandler, config, newConnector, newLoader)) - router.Handle("/tenants", makeHandler(tenantsHandler, config, newConnector, newLoader)) +// SetupRouter setups router +func SetupRouter(config *config.Config, newCoordinator func(ctx context.Context, config *config.Config) coordinator.Coordinator) *gin.Engine { + r := gin.New() + r.HandleMethodNotAllowed = true + r.Use(recovery(), requestIDHandler(), requestLoggerHandler()) - return router -} + if v, ok := binding.Validator.Engine().(*validator.Validate); ok { + v.RegisterValidation("response", types.ValidateMigrationsResponseType) + v.RegisterValidation("mode", types.ValidateMigrationsModeType) + } -// Start starts simple Migrator API endpoint using config passed as first argument -// and using connector created by a function passed as second argument and disk loader created by a function passed as third argument -func Start(config *config.Config) (*http.Server, error) { - port := getPort(config) - log.Printf("INFO migrator starting on http://0.0.0.0:%s", port) + v1 := r.Group("/v1") - router := registerHandlers(config, db.NewConnector, loader.NewLoader) + v1.GET("/config", makeHandler(config, newCoordinator, configHandler)) - server := &http.Server{ - Addr: ":" + port, - Handler: tracing()(router), - } + v1.GET("/tenants", makeHandler(config, newCoordinator, tenantsGetHandler)) + v1.POST("/tenants", makeHandler(config, newCoordinator, tenantsPostHandler)) - err := server.ListenAndServe() + v1.GET("/migrations/source", makeHandler(config, newCoordinator, migrationsSourceHandler)) + v1.GET("/migrations/applied", makeHandler(config, newCoordinator, migrationsAppliedHandler)) + v1.POST("/migrations", makeHandler(config, newCoordinator, migrationsPostHandler)) - return server, err + return r } diff --git a/server/server_mocks.go b/server/server_mocks.go index bfdb707..16c2bf8 100644 --- a/server/server_mocks.go +++ b/server/server_mocks.go @@ -2,117 +2,66 @@ package server import ( "context" - "errors" "fmt" - "math" "time" "github.com/lukaszbudnik/migrator/config" - "github.com/lukaszbudnik/migrator/db" - "github.com/lukaszbudnik/migrator/loader" + "github.com/lukaszbudnik/migrator/coordinator" "github.com/lukaszbudnik/migrator/types" ) -// will start returning errors when errorThreshold reached -type mockedErrorDiskLoader struct { +type mockedCoordinator struct { errorThreshold int counter int } -func (m *mockedErrorDiskLoader) GetDiskMigrations() ([]types.Migration, error) { - if m.errorThreshold == m.counter { - return nil, errors.New("disk trouble maker") - } - m.counter++ - m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleMigration, Contents: "select abc"} - m2 := types.Migration{Name: "201602220001.sql", SourceDir: "source", File: "source/201602220001.sql", MigrationType: types.MigrationTypeTenantMigration, Contents: "select def"} - return []types.Migration{m1, m2}, nil +func newMockedCoordinator(ctx context.Context, config *config.Config) coordinator.Coordinator { + return newMockedErrorCoordinator(-1)(ctx, config) } -func newMockedErrorDiskLoader(errorThreshold int) func(config *config.Config) loader.Loader { - return func(config *config.Config) loader.Loader { - return &mockedErrorDiskLoader{errorThreshold: errorThreshold} +func newMockedErrorCoordinator(errorThreshold int) func(context.Context, *config.Config) coordinator.Coordinator { + return func(ctx context.Context, config *config.Config) coordinator.Coordinator { + return &mockedCoordinator{errorThreshold: errorThreshold} } } -func newMockedDiskLoader(config *config.Config) loader.Loader { - return newMockedErrorDiskLoader(math.MaxInt64)(config) +func (m *mockedCoordinator) Dispose() { } -type mockedBrokenCheckSumDiskLoader struct { -} - -func (m *mockedBrokenCheckSumDiskLoader) GetDiskMigrations() ([]types.Migration, error) { - m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleMigration, Contents: "select abc", CheckSum: "xxx"} - return []types.Migration{m1}, nil -} - -func newBrokenCheckSumMockedDiskLoader(config *config.Config) loader.Loader { - return new(mockedBrokenCheckSumDiskLoader) -} - -// will start returning errors when errorThreshold reached -type mockedErrorConnector struct { - errorThreshold int - counter int -} - -func (m *mockedErrorConnector) Init() error { +func (m *mockedCoordinator) GetSourceMigrations() []types.Migration { if m.errorThreshold == m.counter { - return fmt.Errorf("Mocked Error Connector: threshold %v reached", m.errorThreshold) + panic(fmt.Sprintf("Mocked Error Disk Loader: threshold %v reached", m.errorThreshold)) } m.counter++ - return nil -} - -func (m *mockedErrorConnector) Dispose() { -} - -func (m *mockedErrorConnector) AddTenantAndApplyMigrations(context.Context, string, []types.Migration) error { - if m.errorThreshold == m.counter { - return fmt.Errorf("Mocked Error Connector: threshold %v reached", m.errorThreshold) - } - m.counter++ - return nil -} - -func (m *mockedErrorConnector) GetTenants() ([]string, error) { - if m.errorThreshold == m.counter { - return nil, fmt.Errorf("Mocked Error Connector: threshold %v reached", m.errorThreshold) - } - m.counter++ - return []string{"a", "b", "c"}, nil + m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleMigration, Contents: "select abc"} + m2 := types.Migration{Name: "201602220001.sql", SourceDir: "source", File: "source/201602220001.sql", MigrationType: types.MigrationTypeTenantMigration, Contents: "select def"} + return []types.Migration{m1, m2} } -func (m *mockedErrorConnector) GetDBMigrations() ([]types.MigrationDB, error) { - if m.errorThreshold == m.counter { - return nil, fmt.Errorf("Mocked Error Connector: threshold %v reached", m.errorThreshold) - } - m.counter++ +func (m *mockedCoordinator) GetAppliedMigrations() []types.MigrationDB { m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleMigration} d1 := time.Date(2016, 02, 22, 16, 41, 1, 123, time.UTC) - ms := []types.MigrationDB{{Migration: m1, Schema: "source", Created: d1}} - return ms, nil + ms := []types.MigrationDB{{Migration: m1, Schema: "source", AppliedAt: d1}} + return ms } -func (m *mockedErrorConnector) ApplyMigrations(ctx context.Context, migrations []types.Migration) error { +func (m *mockedCoordinator) GetTenants() []string { + return []string{"a", "b", "c"} +} + +func (m *mockedCoordinator) VerifySourceMigrationsCheckSums() (bool, []types.Migration) { if m.errorThreshold == m.counter { - return fmt.Errorf("Mocked Error Connector: threshold %v reached", m.errorThreshold) + m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleMigration, Contents: "select abc", CheckSum: "123"} + return false, []types.Migration{m1} } m.counter++ - return nil -} - -func newMockedConnector(config *config.Config) (db.Connector, error) { - return newMockedErrorConnector(math.MaxInt64)(config) + return true, nil } -func newMockedErrorConnector(errorThreshold int) func(*config.Config) (db.Connector, error) { - return func(config *config.Config) (db.Connector, error) { - return &mockedErrorConnector{errorThreshold: errorThreshold}, nil - } +func (m *mockedCoordinator) ApplyMigrations(types.MigrationsModeType) (*types.MigrationResults, []types.Migration) { + return &types.MigrationResults{}, m.GetSourceMigrations() } -func newConnectorReturnError(config *config.Config) (db.Connector, error) { - return nil, errors.New("trouble maker") +func (m *mockedCoordinator) AddTenantAndApplyMigrations(types.MigrationsModeType, string) (*types.MigrationResults, []types.Migration) { + return &types.MigrationResults{}, m.GetSourceMigrations()[1:] } diff --git a/server/server_test.go b/server/server_test.go index 60bc222..00aebf3 100644 --- a/server/server_test.go +++ b/server/server_test.go @@ -2,19 +2,13 @@ package server import ( "bytes" - "context" - "errors" - "fmt" "io" "net/http" "net/http/httptest" "strings" "testing" - "github.com/lukaszbudnik/migrator/common" "github.com/lukaszbudnik/migrator/config" - "github.com/lukaszbudnik/migrator/db" - "github.com/lukaszbudnik/migrator/loader" "github.com/stretchr/testify/assert" ) @@ -24,317 +18,243 @@ var ( ) func newTestRequest(method, url string, body io.Reader) (*http.Request, error) { - req, err := http.NewRequest(method, url, body) - if err != nil { - return req, err - } - ctx := req.Context() - ctx = context.WithValue(ctx, common.RequestIDKey{}, "123") - action := fmt.Sprintf("%v %v", method, strings.Replace(url, "http://example.com", "", -1)) - ctx = context.WithValue(ctx, common.ActionKey{}, action) - return req.WithContext(ctx), err + versionURL := "/v1" + url + return http.NewRequest(method, versionURL, body) } func TestGetDefaultPort(t *testing.T) { config, err := config.FromFile(configFile) assert.Nil(t, err) - assert.Equal(t, "8080", getPort(config)) + assert.Equal(t, "8080", GetPort(config)) } func TestGetDefaultPortOverrides(t *testing.T) { config, err := config.FromFile(configFileOverrides) assert.Nil(t, err) - assert.Equal(t, "8811", getPort(config)) + assert.Equal(t, "8811", GetPort(config)) } -func TestRegisterHandlers(t *testing.T) { - config, err := config.FromFile(configFile) - assert.Nil(t, err) - router := registerHandlers(config, nil, nil) - assert.NotNil(t, router) -} +// section /config -// section: /config -func TestServerConfig(t *testing.T) { +func TestConfigRoute(t *testing.T) { config, err := config.FromFile(configFile) assert.Nil(t, err) - req, _ := newTestRequest(http.MethodGet, "http://example.com/config", nil) + router := SetupRouter(config, nil) w := httptest.NewRecorder() - handler := makeHandler(configHandler, config, nil, nil) - handler(w, req) + req, _ := newTestRequest("GET", "/config", nil) + router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) - assert.Equal(t, "application/x-yaml", w.HeaderMap["Content-Type"][0]) + assert.Equal(t, "application/x-yaml; charset=utf-8", w.HeaderMap["Content-Type"][0]) + assert.Equal(t, config.String(), strings.TrimSpace(w.Body.String())) } -func TestServerConfigMethodNotAllowed(t *testing.T) { - config, err := config.FromFile(configFile) - assert.Nil(t, err) - - httpMethods := []string{http.MethodHead, http.MethodPost, http.MethodPut, http.MethodPatch, http.MethodDelete, http.MethodConnect, http.MethodOptions, http.MethodTrace} - - for _, httpMethod := range httpMethods { +// section /migrations/source - req, _ := newTestRequest(httpMethod, "http://example.com/config", nil) - - w := httptest.NewRecorder() - handler := makeHandler(configHandler, config, newMockedConnector, newMockedDiskLoader) - handler(w, req) - - assert.Equal(t, http.StatusMethodNotAllowed, w.Code) - } -} - -// section: /tenants -func TestServerTenantsGet(t *testing.T) { +func TestDiskMigrationsRoute(t *testing.T) { config, err := config.FromFile(configFile) assert.Nil(t, err) - req, _ := newTestRequest(http.MethodGet, "http://example.com/tenants", nil) + router := SetupRouter(config, newMockedCoordinator) w := httptest.NewRecorder() - handler := makeHandler(tenantsHandler, config, newMockedConnector, nil) - handler(w, req) + req, _ := newTestRequest("GET", "/migrations/source", nil) + router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) - assert.Equal(t, "application/json", w.HeaderMap["Content-Type"][0]) - assert.Equal(t, `["a","b","c"]`, strings.TrimSpace(w.Body.String())) + assert.Equal(t, "application/json; charset=utf-8", w.HeaderMap["Content-Type"][0]) + assert.Equal(t, `[{"name":"201602220000.sql","sourceDir":"source","file":"source/201602220000.sql","migrationType":1,"contents":"select abc","checkSum":""},{"name":"201602220001.sql","sourceDir":"source","file":"source/201602220001.sql","migrationType":2,"contents":"select def","checkSum":""}]`, strings.TrimSpace(w.Body.String())) } -func TestServerTenantsPost(t *testing.T) { +// section /migrations/applied + +func TestAppliedMigrationsRoute(t *testing.T) { config, err := config.FromFile(configFile) assert.Nil(t, err) - json := []byte(`{"name": "new_tenant"}`) - req, _ := newTestRequest(http.MethodPost, "http://example.com/tenants", bytes.NewBuffer(json)) - req.Header.Set("Content-Type", "application/json") + router := SetupRouter(config, newMockedCoordinator) + + req, _ := newTestRequest(http.MethodGet, "/migrations/applied", nil) w := httptest.NewRecorder() - handler := makeHandler(tenantsHandler, config, newMockedConnector, newMockedDiskLoader) - handler(w, req) + router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) - assert.Equal(t, "application/json", w.HeaderMap["Content-Type"][0]) - assert.Equal(t, `[{"Name":"201602220001.sql","SourceDir":"source","File":"source/201602220001.sql","MigrationType":2,"Contents":"select def","CheckSum":""}]`, strings.TrimSpace(w.Body.String())) + assert.Equal(t, "application/json; charset=utf-8", w.HeaderMap["Content-Type"][0]) + assert.Equal(t, `[{"name":"201602220000.sql","sourceDir":"source","file":"source/201602220000.sql","migrationType":1,"contents":"","checkSum":"","schema":"source","appliedAt":"2016-02-22T16:41:01.000000123Z"}]`, strings.TrimSpace(w.Body.String())) } -type errReader int - -func (errReader) Read(p []byte) (n int, err error) { - return 0, errors.New("trouble maker") -} +// section /migrations -func TestServerTenantsPostIOError(t *testing.T) { +func TestMigrationsPostRoute(t *testing.T) { config, err := config.FromFile(configFile) assert.Nil(t, err) - req, _ := newTestRequest(http.MethodPost, "http://example.com/tenants", errReader(0)) - req.Header.Set("Content-Type", "application/json") + router := SetupRouter(config, newMockedCoordinator) + + json := []byte(`{"mode": "apply", "response": "full"}`) + req, _ := newTestRequest(http.MethodPost, "/migrations", bytes.NewBuffer(json)) w := httptest.NewRecorder() - handler := makeHandler(tenantsHandler, config, newMockedConnector, newMockedDiskLoader) - handler(w, req) + router.ServeHTTP(w, req) - assert.Equal(t, http.StatusInternalServerError, w.Code) - assert.Equal(t, "application/json", w.HeaderMap["Content-Type"][0]) - assert.Equal(t, `{"ErrorMessage":"trouble maker"}`, strings.TrimSpace(w.Body.String())) + assert.Equal(t, http.StatusOK, w.Code) + assert.Equal(t, "application/json; charset=utf-8", w.HeaderMap["Content-Type"][0]) + assert.Contains(t, strings.TrimSpace(w.Body.String()), `[{"name":"201602220000.sql","sourceDir":"source","file":"source/201602220000.sql","migrationType":1,"contents":"select abc","checkSum":""},{"name":"201602220001.sql","sourceDir":"source","file":"source/201602220001.sql","migrationType":2,"contents":"select def","checkSum":""}]`) } -func TestServerTenantsPostBadRequest(t *testing.T) { +func TestMigrationsPostRouteSummaryResponse(t *testing.T) { config, err := config.FromFile(configFile) assert.Nil(t, err) - // empty JSON payload - json := []byte("") - req, _ := newTestRequest(http.MethodPost, "http://example.com/tenants", bytes.NewBuffer(json)) + router := SetupRouter(config, newMockedCoordinator) + + json := []byte(`{"mode": "apply", "response": "summary"}`) + req, _ := newTestRequest(http.MethodPost, "/migrations", bytes.NewBuffer(json)) w := httptest.NewRecorder() - handler := makeHandler(tenantsHandler, config, newMockedConnector, newMockedDiskLoader) - handler(w, req) + router.ServeHTTP(w, req) - assert.Equal(t, http.StatusBadRequest, w.Code) + assert.Equal(t, http.StatusOK, w.Code) + assert.Equal(t, "application/json; charset=utf-8", w.HeaderMap["Content-Type"][0]) + assert.Contains(t, strings.TrimSpace(w.Body.String()), `"results":`) + assert.NotContains(t, strings.TrimSpace(w.Body.String()), `[{"name":"201602220000.sql","sourceDir":"source","file":"source/201602220000.sql","migrationType":1,"contents":"select abc","checkSum":""},{"name":"201602220001.sql","sourceDir":"source","file":"source/201602220001.sql","migrationType":2,"contents":"select def","checkSum":""}]`) } -func TestServerTenantsPostFailedDependency(t *testing.T) { +func TestMigrationsPostRouteBadRequest(t *testing.T) { config, err := config.FromFile(configFile) assert.Nil(t, err) - json := []byte(`{"name": "new_tenant"}`) - req, _ := newTestRequest(http.MethodPost, "http://example.com/tenants", bytes.NewBuffer(json)) + router := SetupRouter(config, newMockedCoordinator) + + // response is invalid + json := []byte(`{"mode": "apply", "response": "abc"}`) + req, _ := newTestRequest(http.MethodPost, "/migrations", bytes.NewBuffer(json)) w := httptest.NewRecorder() - handler := makeHandler(tenantsHandler, config, newMockedConnector, newBrokenCheckSumMockedDiskLoader) - handler(w, req) + router.ServeHTTP(w, req) - assert.Equal(t, http.StatusFailedDependency, w.Code) - assert.Equal(t, "application/json", w.HeaderMap["Content-Type"][0]) - assert.Equal(t, `{"ErrorMessage":"Checksum verification failed. Please review offending migrations.","OffendingMigrations":[{"Name":"201602220000.sql","SourceDir":"source","File":"source/201602220000.sql","MigrationType":1,"Contents":"select abc","CheckSum":"xxx"}]}`, strings.TrimSpace(w.Body.String())) + assert.Equal(t, http.StatusBadRequest, w.Code) + assert.Equal(t, "application/json; charset=utf-8", w.HeaderMap["Content-Type"][0]) + assert.Equal(t, `{"error":"Invalid request, please see documentation for valid JSON payload"}`, strings.TrimSpace(w.Body.String())) } -func TestServerTenantMethodNotAllowed(t *testing.T) { +func TestMigrationsPostRouteCheckSumError(t *testing.T) { config, err := config.FromFile(configFile) assert.Nil(t, err) - httpMethods := []string{http.MethodHead, http.MethodPut, http.MethodPatch, http.MethodDelete, http.MethodConnect, http.MethodOptions, http.MethodTrace} + router := SetupRouter(config, newMockedErrorCoordinator(0)) - for _, httpMethod := range httpMethods { - req, _ := newTestRequest(httpMethod, "http://example.com/tenants", nil) + json := []byte(`{"mode": "apply", "response": "full"}`) + req, _ := newTestRequest(http.MethodPost, "/migrations", bytes.NewBuffer(json)) - w := httptest.NewRecorder() - handler := makeHandler(tenantsHandler, config, newMockedConnector, newMockedDiskLoader) - handler(w, req) - - assert.Equal(t, http.StatusMethodNotAllowed, w.Code) - } + w := httptest.NewRecorder() + router.ServeHTTP(w, req) + assert.Equal(t, http.StatusFailedDependency, w.Code) + assert.Equal(t, "application/json; charset=utf-8", w.HeaderMap["Content-Type"][0]) + assert.Contains(t, strings.TrimSpace(w.Body.String()), `"error":"Checksum verification failed. Please review offending migrations."`) } -// section: /diskMigrations +// section /tenants -func TestServerDiskMigrationsGet(t *testing.T) { +func TestTenantsGetRoute(t *testing.T) { config, err := config.FromFile(configFile) assert.Nil(t, err) - req, _ := newTestRequest(http.MethodGet, "http://example.com/diskMigrations", nil) + router := SetupRouter(config, newMockedCoordinator) w := httptest.NewRecorder() - handler := makeHandler(diskMigrationsHandler, config, nil, newMockedDiskLoader) - handler(w, req) + req, _ := newTestRequest("GET", "/tenants", nil) + router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) - assert.Equal(t, "application/json", w.HeaderMap["Content-Type"][0]) - assert.Equal(t, `[{"Name":"201602220000.sql","SourceDir":"source","File":"source/201602220000.sql","MigrationType":1,"Contents":"select abc","CheckSum":""},{"Name":"201602220001.sql","SourceDir":"source","File":"source/201602220001.sql","MigrationType":2,"Contents":"select def","CheckSum":""}]`, strings.TrimSpace(w.Body.String())) + assert.Equal(t, "application/json; charset=utf-8", w.HeaderMap["Content-Type"][0]) + assert.Equal(t, `["a","b","c"]`, strings.TrimSpace(w.Body.String())) } -func TestServerDiskMigrationsMethodNotAllowed(t *testing.T) { +func TestTenantsPostRoute(t *testing.T) { config, err := config.FromFile(configFile) assert.Nil(t, err) - httpMethods := []string{http.MethodHead, http.MethodPost, http.MethodPut, http.MethodPatch, http.MethodDelete, http.MethodConnect, http.MethodOptions, http.MethodTrace} - - for _, httpMethod := range httpMethods { - - req, _ := newTestRequest(httpMethod, "http://example.com/diskMigrations", nil) - - w := httptest.NewRecorder() - handler := makeHandler(diskMigrationsHandler, config, newMockedConnector, newMockedDiskLoader) - handler(w, req) + router := SetupRouter(config, newMockedCoordinator) - assert.Equal(t, http.StatusMethodNotAllowed, w.Code) - } - -} - -// section: /migrations - -func TestServerMigrationsGet(t *testing.T) { - config, err := config.FromFile(configFile) - assert.Nil(t, err) - - req, _ := newTestRequest(http.MethodGet, "http://example.com/migrations", nil) + json := []byte(`{"name": "new_tenant", "response": "full", "mode":"dry-run"}`) + req, _ := newTestRequest(http.MethodPost, "/tenants", bytes.NewBuffer(json)) w := httptest.NewRecorder() - handler := makeHandler(migrationsHandler, config, newMockedConnector, nil) - handler(w, req) + router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) - assert.Equal(t, "application/json", w.HeaderMap["Content-Type"][0]) - assert.Equal(t, `[{"Name":"201602220000.sql","SourceDir":"source","File":"source/201602220000.sql","MigrationType":1,"Contents":"","CheckSum":"","Schema":"source","Created":"2016-02-22T16:41:01.000000123Z"}]`, strings.TrimSpace(w.Body.String())) + assert.Equal(t, "application/json; charset=utf-8", w.HeaderMap["Content-Type"][0]) + assert.Contains(t, strings.TrimSpace(w.Body.String()), `[{"name":"201602220001.sql","sourceDir":"source","file":"source/201602220001.sql","migrationType":2,"contents":"select def","checkSum":""}]`) } -func TestServerMigrationsPost(t *testing.T) { +func TestTenantsPostRouteSummaryResponse(t *testing.T) { config, err := config.FromFile(configFile) assert.Nil(t, err) - req, _ := newTestRequest(http.MethodPost, "http://example.com/migrations", nil) + router := SetupRouter(config, newMockedCoordinator) + + json := []byte(`{"name": "new_tenant", "response": "summary", "mode":"dry-run"}`) + req, _ := newTestRequest(http.MethodPost, "/tenants", bytes.NewBuffer(json)) w := httptest.NewRecorder() - handler := makeHandler(migrationsHandler, config, newMockedConnector, newMockedDiskLoader) - handler(w, req) + router.ServeHTTP(w, req) assert.Equal(t, http.StatusOK, w.Code) - assert.Equal(t, "application/json", w.HeaderMap["Content-Type"][0]) - assert.Equal(t, `[{"Name":"201602220001.sql","SourceDir":"source","File":"source/201602220001.sql","MigrationType":2,"Contents":"select def","CheckSum":""}]`, strings.TrimSpace(w.Body.String())) + assert.Equal(t, "application/json; charset=utf-8", w.HeaderMap["Content-Type"][0]) + assert.Contains(t, strings.TrimSpace(w.Body.String()), `"results":`) + assert.NotContains(t, strings.TrimSpace(w.Body.String()), `[{"name":"201602220001.sql","sourceDir":"source","file":"source/201602220001.sql","migrationType":2,"contents":"select def","checkSum":""}]`) } -func TestServerMigrationsPostFailedDependency(t *testing.T) { +func TestTenantsPostRouteBadRequestError(t *testing.T) { config, err := config.FromFile(configFile) assert.Nil(t, err) - req, _ := newTestRequest(http.MethodPost, "http://example.com/migrations", nil) + router := SetupRouter(config, newMockedCoordinator) + + json := []byte(`{"a": "new_tenant"}`) + req, _ := newTestRequest(http.MethodPost, "/tenants", bytes.NewBuffer(json)) w := httptest.NewRecorder() - handler := makeHandler(migrationsHandler, config, newMockedConnector, newBrokenCheckSumMockedDiskLoader) - handler(w, req) + router.ServeHTTP(w, req) - assert.Equal(t, http.StatusFailedDependency, w.Code) - assert.Equal(t, "application/json", w.HeaderMap["Content-Type"][0]) - assert.Equal(t, `{"ErrorMessage":"Checksum verification failed. Please review offending migrations.","OffendingMigrations":[{"Name":"201602220000.sql","SourceDir":"source","File":"source/201602220000.sql","MigrationType":1,"Contents":"select abc","CheckSum":"xxx"}]}`, strings.TrimSpace(w.Body.String())) + assert.Equal(t, http.StatusBadRequest, w.Code) + assert.Equal(t, "application/json; charset=utf-8", w.HeaderMap["Content-Type"][0]) + assert.Equal(t, `{"error":"Invalid request, please see documentation for valid JSON payload"}`, strings.TrimSpace(w.Body.String())) } -func TestServerMigrationsMethodNotAllowed(t *testing.T) { +func TestTenantsPostRouteCheckSumError(t *testing.T) { config, err := config.FromFile(configFile) assert.Nil(t, err) - httpMethods := []string{http.MethodHead, http.MethodPut, http.MethodPatch, http.MethodDelete, http.MethodConnect, http.MethodOptions, http.MethodTrace} - - for _, httpMethod := range httpMethods { - req, _ := newTestRequest(httpMethod, "http://example.com/migrations", nil) + router := SetupRouter(config, newMockedErrorCoordinator(0)) - w := httptest.NewRecorder() - handler := makeHandler(migrationsHandler, config, newMockedConnector, newMockedDiskLoader) - handler(w, req) + json := []byte(`{"name": "new_tenant", "response": "full", "mode":"dry-run"}`) + req, _ := newTestRequest(http.MethodPost, "/tenants", bytes.NewBuffer(json)) - assert.Equal(t, http.StatusMethodNotAllowed, w.Code) - } + w := httptest.NewRecorder() + router.ServeHTTP(w, req) + assert.Equal(t, http.StatusFailedDependency, w.Code) + assert.Equal(t, "application/json; charset=utf-8", w.HeaderMap["Content-Type"][0]) + assert.Contains(t, strings.TrimSpace(w.Body.String()), `"error":"Checksum verification failed. Please review offending migrations."`) } -func TestServerInternalServerErrors(t *testing.T) { - c, err := config.FromFile(configFile) +func TestRouteError(t *testing.T) { + config, err := config.FromFile(configFile) assert.Nil(t, err) - requests := []struct { - method string - path string - handler func(http.ResponseWriter, *http.Request, *config.Config, func(*config.Config) (db.Connector, error), func(*config.Config) loader.Loader) - createConnector func(config *config.Config) (db.Connector, error) - createLoader func(config *config.Config) loader.Loader - payload io.Reader - }{{http.MethodGet, "tenants", tenantsHandler, newConnectorReturnError, newMockedDiskLoader, nil}, - {http.MethodGet, "tenants", tenantsHandler, newMockedErrorConnector(0), newMockedDiskLoader, nil}, - {http.MethodGet, "tenants", tenantsHandler, newMockedErrorConnector(1), newMockedDiskLoader, nil}, - {http.MethodPost, "tenants", tenantsHandler, newMockedErrorConnector(0), newMockedDiskLoader, bytes.NewBuffer([]byte(`{"name": "new_tenant"}`))}, - {http.MethodPost, "tenants", tenantsHandler, newMockedErrorConnector(1), newMockedDiskLoader, bytes.NewBuffer([]byte(`{"name": "new_tenant"}`))}, - {http.MethodPost, "tenants", tenantsHandler, newMockedErrorConnector(1), newMockedErrorDiskLoader(0), bytes.NewBuffer([]byte(`{"name": "new_tenant"}`))}, - {http.MethodPost, "tenants", tenantsHandler, newMockedErrorConnector(2), newMockedDiskLoader, bytes.NewBuffer([]byte(`{"name": "new_tenant"}`))}, - {http.MethodGet, "migrations", migrationsHandler, newConnectorReturnError, newMockedDiskLoader, nil}, - {http.MethodGet, "migrations", migrationsHandler, newMockedErrorConnector(1), newMockedDiskLoader, nil}, - {http.MethodPost, "migrations", migrationsHandler, newMockedErrorConnector(0), newMockedDiskLoader, nil}, - {http.MethodPost, "migrations", migrationsHandler, newMockedErrorConnector(1), newMockedDiskLoader, nil}, - {http.MethodPost, "migrations", migrationsHandler, newMockedErrorConnector(1), newMockedErrorDiskLoader(0), nil}, - {http.MethodPost, "migrations", migrationsHandler, newMockedErrorConnector(2), newMockedDiskLoader, nil}, - {http.MethodGet, "diskMigrations", diskMigrationsHandler, newMockedConnector, newMockedErrorDiskLoader(0), nil}} - - for _, r := range requests { - req, _ := newTestRequest(r.method, fmt.Sprintf("http://example.com/%v", r.path), r.payload) - - w := httptest.NewRecorder() - handler := makeHandler(r.handler, c, r.createConnector, r.createLoader) - handler(w, req) - - assert.Equal(t, http.StatusInternalServerError, w.Code) - } -} - -func TestTracing(t *testing.T) { - r, _ := newTestRequest(http.MethodGet, "http://example.com/sdsdf", nil) + router := SetupRouter(config, newMockedErrorCoordinator(0)) w := httptest.NewRecorder() - handler := tracing()(http.NotFoundHandler()) - handler.ServeHTTP(w, r) + req, _ := newTestRequest("GET", "/migrations/source", nil) + router.ServeHTTP(w, req) - assert.Equal(t, http.StatusNotFound, w.Code) + assert.Equal(t, http.StatusInternalServerError, w.Code) + assert.Equal(t, "application/json; charset=utf-8", w.HeaderMap["Content-Type"][0]) + assert.Equal(t, `{"error":"Mocked Error Disk Loader: threshold 0 reached"}`, strings.TrimSpace(w.Body.String())) } diff --git a/setup.sh b/setup.sh index 164467e..2652735 100755 --- a/setup.sh +++ b/setup.sh @@ -1,3 +1,7 @@ #!/usr/bin/env sh -go get -t -v ./... +# this is for dockerhub failing on fetching packages from gopkg.in +# travis resolves this by 3 retries so adapting 3 retries here as well +for i in {1..3}; do + go get -t -v ./... && break || sleep 15; +done diff --git a/test/migrator-mssql.yaml b/test/migrator-mssql.yaml index 5deb59d..571847b 100644 --- a/test/migrator-mssql.yaml +++ b/test/migrator-mssql.yaml @@ -2,7 +2,6 @@ baseDir: test/migrations driver: sqlserver dataSource: "A" singleMigrations: - - public - ref - config tenantMigrations: diff --git a/test/migrator-mysql.yaml b/test/migrator-mysql.yaml index 5c3d323..ef3d6f6 100644 --- a/test/migrator-mysql.yaml +++ b/test/migrator-mysql.yaml @@ -2,7 +2,6 @@ baseDir: test/migrations driver: mysql dataSource: "A" singleMigrations: - - public - ref - config tenantMigrations: diff --git a/test/migrator-mysql.yaml.travis b/test/migrator-mysql.yaml.travis index e53cbb6..6775d41 100644 --- a/test/migrator-mysql.yaml.travis +++ b/test/migrator-mysql.yaml.travis @@ -2,7 +2,6 @@ baseDir: test/migrations driver: mysql dataSource: "root:@tcp(127.0.0.1:3306)/migrator_test?parseTime=true&timeout=1s" singleMigrations: - - public - ref - config tenantMigrations: diff --git a/test/migrator-postgresql.yaml b/test/migrator-postgresql.yaml index 53e0edb..0d905f8 100644 --- a/test/migrator-postgresql.yaml +++ b/test/migrator-postgresql.yaml @@ -2,7 +2,6 @@ baseDir: test/migrations driver: postgres dataSource: "user=postgres dbname=A host=B port=C sslmode=disable connect_timeout=1" singleMigrations: - - public - ref - config tenantMigrations: diff --git a/test/migrator-postgresql.yaml.travis b/test/migrator-postgresql.yaml.travis index dfe8f02..675871e 100644 --- a/test/migrator-postgresql.yaml.travis +++ b/test/migrator-postgresql.yaml.travis @@ -3,7 +3,6 @@ baseDir: test/migrations driver: postgres dataSource: "user=postgres dbname=migrator_test host=127.0.0.1 port=5432 sslmode=disable connect_timeout=1" singleMigrations: - - public - ref - config tenantMigrations: diff --git a/test/migrator-test-envs.yaml b/test/migrator-test-envs.yaml index 356501f..efe4f50 100644 --- a/test/migrator-test-envs.yaml +++ b/test/migrator-test-envs.yaml @@ -14,6 +14,5 @@ singleMigrations: tenantMigrations: - tenants webHookURL: ${SHLVL} -webHookTemplate: ${TERM} webHookHeaders: - "X-Security-Token: ${USER}" diff --git a/test/migrator-test.yaml b/test/migrator-test.yaml index e38d179..03aa39b 100644 --- a/test/migrator-test.yaml +++ b/test/migrator-test.yaml @@ -13,7 +13,6 @@ singleMigrations: tenantMigrations: - tenants webHookURL: https://slack.com/api/api.test -webHookTemplate: "{\"text\": \"{text}\",\"icon_emoji\": \":white_check_mark:\"}" webHookHeaders: - "Authorization: Basic QWxhZGRpbjpPcGVuU2VzYW1l" - "Content-Type: application/json" diff --git a/types/types.go b/types/types.go index 7076183..65e5c62 100644 --- a/types/types.go +++ b/types/types.go @@ -2,6 +2,8 @@ package types import ( "time" + + "github.com/go-playground/validator" ) // MigrationType stores information about type of migration @@ -18,19 +20,74 @@ const ( MigrationTypeTenantScript MigrationType = 4 ) +// MigrationsResponseType represents type of response either full or summary +type MigrationsResponseType string + +const ( + // ResponseTypeSummary instructs migrator to only return JSON representation of Results struct + ResponseTypeSummary MigrationsResponseType = "summary" + // ResponseTypeFull instructs migrator to return JSON representation of both Results struct and all applied migrations + ResponseTypeFull MigrationsResponseType = "full" +) + +// MigrationsModeType represents mode in which migrations should be applied +type MigrationsModeType string + +const ( + // ModeTypeApply instructs migrator to apply migrations + ModeTypeApply MigrationsModeType = "apply" + // ModeTypeDryRun instructs migrator to perform apply operation in dry-run mode, instead of committing transaction it is rollbacked + ModeTypeDryRun MigrationsModeType = "dry-run" + // ModeTypeSync instructs migrator to only synchronise migrations + ModeTypeSync MigrationsModeType = "sync" +) + +// ValidateMigrationsModeType validates MigrationsModeType used by binding package +func ValidateMigrationsModeType(fl validator.FieldLevel) bool { + mode, ok := fl.Field().Interface().(MigrationsModeType) + if ok { + return mode == ModeTypeApply || mode == ModeTypeSync || mode == ModeTypeDryRun + } + return false +} + +// ValidateMigrationsResponseType validates MigrationsResponseType used by binding package +func ValidateMigrationsResponseType(fl validator.FieldLevel) bool { + response, ok := fl.Field().Interface().(MigrationsResponseType) + if ok { + return response == ResponseTypeSummary || response == ResponseTypeFull + } + return false +} + // Migration contains basic information about migration type Migration struct { - Name string - SourceDir string - File string - MigrationType MigrationType - Contents string - CheckSum string + Name string `json:"name"` + SourceDir string `json:"sourceDir"` + File string `json:"file"` + MigrationType MigrationType `json:"migrationType"` + Contents string `json:"contents"` + CheckSum string `json:"checkSum"` } // MigrationDB embeds Migration and adds DB-specific fields type MigrationDB struct { Migration - Schema string - Created time.Time + Schema string `json:"schema"` + AppliedAt time.Time `json:"appliedAt"` +} + +// MigrationResults contains summary information about executed migrations +type MigrationResults struct { + StartedAt time.Time `json:"startedAt"` + Duration time.Duration `json:"duration"` + Tenants int `json:"tenants"` + SingleMigrations int `json:"singleMigrations"` + TenantMigrations int `json:"tenantMigrations"` + TenantMigrationsTotal int `json:"tenantMigrationsTotal"` // tenant migrations for all tenants + MigrationsGrandTotal int `json:"migrationsGrandTotal"` // total number of all migrations applied + SingleScripts int `json:"singleScripts"` + TenantScripts int `json:"tenantScripts"` + TenantScriptsTotal int `json:"tenantScriptsTotal"` // tenant scripts for all tenants + ScriptsGrandTotal int `json:"scriptsGrandTotal"` // total number of all scripts applied } diff --git a/utils/utils.go b/utils/utils.go deleted file mode 100644 index e975959..0000000 --- a/utils/utils.go +++ /dev/null @@ -1,63 +0,0 @@ -package utils - -import ( - "bytes" - "fmt" - "io" - "text/tabwriter" - - "github.com/lukaszbudnik/migrator/types" -) - -// MigrationArrayToString creates a string representation of Migration array -func MigrationArrayToString(migrations []types.Migration) string { - buffer := new(bytes.Buffer) - w := tabwriter.NewWriter(buffer, 0, 0, 1, ' ', tabwriter.Debug) - - fmt.Fprintf(w, "%v \t %v \t %v \t %v \t %v", "SourceDir", "Name", "File", "Type", "CheckSum") - - for _, m := range migrations { - formatMigration(w, &m) - } - - w.Flush() - return buffer.String() -} - -func formatMigration(w io.Writer, m *types.Migration) { - fmt.Fprintf(w, "\n%v \t %v \t %v \t %v \t %v", m.SourceDir, m.Name, m.File, m.MigrationType, m.CheckSum) -} - -// MigrationDBArrayToString creates a string representation of MigrationDB array -func MigrationDBArrayToString(migrations []types.MigrationDB) string { - buffer := new(bytes.Buffer) - w := tabwriter.NewWriter(buffer, 0, 0, 1, ' ', tabwriter.Debug) - - fmt.Fprintf(w, "%v \t %v \t %v \t %v \t %v \t %v \t %v", "SourceDir", "Name", "File", "Schema", "Created", "Type", "CheckSum") - - for _, m := range migrations { - formatMigrationDB(w, &m) - } - - w.Flush() - return buffer.String() -} - -func formatMigrationDB(w io.Writer, m *types.MigrationDB) { - fmt.Fprintf(w, "\n%v \t %v \t %v \t %v \t %v \t %v \t %v", m.SourceDir, m.Name, m.File, m.Schema, m.Created, m.MigrationType, m.CheckSum) -} - -// TenantArrayToString creates a string representation of Tenant array -func TenantArrayToString(dbTenants []string) string { - var buffer bytes.Buffer - - buffer.WriteString("Name") - - for _, t := range dbTenants { - buffer.WriteString("\n") - buffer.WriteString(t) - } - - return buffer.String() - -} diff --git a/utils/utils_test.go b/utils/utils_test.go deleted file mode 100644 index 5db3a1d..0000000 --- a/utils/utils_test.go +++ /dev/null @@ -1,72 +0,0 @@ -package utils - -import ( - "testing" - "time" - - "github.com/lukaszbudnik/migrator/types" - "github.com/stretchr/testify/assert" -) - -func TestTenantArrayToString(t *testing.T) { - dbTenants := []string{"abcabc", "dedededededededededede", "opopopop"} - expected := `Name -abcabc -dedededededededededede -opopopop` - - actual := TenantArrayToString(dbTenants) - - assert.Equal(t, expected, actual) -} - -func TestMigrationArrayToString(t *testing.T) { - - m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleMigration, CheckSum: "abc...123..."} - m2 := types.Migration{Name: "201602220001.sql", SourceDir: "tenants", File: "tenants/201602220001.sql", MigrationType: types.MigrationTypeTenantMigration, CheckSum: "abc...123..."} - m3 := types.Migration{Name: "201602220002.sql", SourceDir: "tenants", File: "tenants/201602220002.sql", MigrationType: types.MigrationTypeTenantMigration, CheckSum: "abc...123..."} - var ms = []types.Migration{m1, m2, m3} - - expected := `SourceDir | Name | File | Type | CheckSum -source | 201602220000.sql | source/201602220000.sql | 1 | abc...123... -tenants | 201602220001.sql | tenants/201602220001.sql | 2 | abc...123... -tenants | 201602220002.sql | tenants/201602220002.sql | 2 | abc...123...` - actual := MigrationArrayToString(ms) - - assert.Equal(t, expected, actual) -} - -func TestMigrationArrayToStringEmpty(t *testing.T) { - - var ms = []types.Migration{} - - expected := `SourceDir | Name | File | Type | CheckSum` - actual := MigrationArrayToString(ms) - - assert.Equal(t, expected, actual) -} - -func TestMigrationDBArrayToString(t *testing.T) { - m1 := types.Migration{Name: "201602220000.sql", SourceDir: "source", File: "source/201602220000.sql", MigrationType: types.MigrationTypeSingleMigration, CheckSum: "abc"} - m2 := types.Migration{Name: "201602220001.sql", SourceDir: "tenants", File: "tenants/201602220001.sql", MigrationType: types.MigrationTypeTenantMigration, CheckSum: "def"} - d1 := time.Date(2016, 02, 22, 16, 41, 1, 123, time.UTC) - d2 := time.Date(2016, 02, 22, 16, 41, 2, 456, time.UTC) - var ms = []types.MigrationDB{{Migration: m1, Schema: "source", Created: d1}, {Migration: m2, Schema: "abc", Created: d2}, {Migration: m2, Schema: "def", Created: d2}} - - expected := `SourceDir | Name | File | Schema | Created | Type | CheckSum -source | 201602220000.sql | source/201602220000.sql | source | 2016-02-22 16:41:01.000000123 +0000 UTC | 1 | abc -tenants | 201602220001.sql | tenants/201602220001.sql | abc | 2016-02-22 16:41:02.000000456 +0000 UTC | 2 | def -tenants | 201602220001.sql | tenants/201602220001.sql | def | 2016-02-22 16:41:02.000000456 +0000 UTC | 2 | def` - actual := MigrationDBArrayToString(ms) - - assert.Equal(t, expected, actual) -} - -func TestMigrationDBArrayToStringEmpty(t *testing.T) { - var ms = []types.MigrationDB{} - - expected := `SourceDir | Name | File | Schema | Created | Type | CheckSum` - actual := MigrationDBArrayToString(ms) - - assert.Equal(t, expected, actual) -}