From 6d17e8cbc4170cbf843efe6e59bd5531cb62eeb4 Mon Sep 17 00:00:00 2001 From: Ben Mildren Date: Tue, 7 Jul 2020 11:17:22 +0100 Subject: [PATCH] initial commit (#1) * Migrated existing modules (and docs fragment) from community.general * Added basic role * Added integration tests for modules * Implemented git actions workflows --- .github/workflows/ansible-test-plugins.yml | 94 +++ .github/workflows/ansible-test-roles.yml | 56 ++ .github/workflows/ansible-test.yml | 59 -- .gitignore | 3 + README.md | 68 +- galaxy.yml | 16 + plugins/README.md | 31 + plugins/doc_fragments/proxysql.py | 61 ++ plugins/module_utils/mysql.py | 110 +++ plugins/modules/proxysql_backend_servers.py | 518 ++++++++++++++ plugins/modules/proxysql_global_variables.py | 269 ++++++++ plugins/modules/proxysql_manage_config.py | 218 ++++++ plugins/modules/proxysql_mysql_users.py | 481 +++++++++++++ plugins/modules/proxysql_query_rules.py | 632 ++++++++++++++++++ .../proxysql_replication_hostgroups.py | 381 +++++++++++ plugins/modules/proxysql_scheduler.py | 424 ++++++++++++ roles/proxysql/.yamllint | 11 + roles/proxysql/README.md | 43 ++ roles/proxysql/defaults/main.yml | 171 +++++ roles/proxysql/handlers/main.yml | 33 + roles/proxysql/meta/main.yml | 20 + roles/proxysql/molecule/default/converge.yml | 5 + roles/proxysql/molecule/default/molecule.yml | 46 ++ roles/proxysql/molecule/default/prepare.yml | 20 + .../molecule/default/tests/test_default.py | 81 +++ roles/proxysql/tasks/config.yml | 32 + roles/proxysql/tasks/install.yml | 90 +++ roles/proxysql/tasks/main.yml | 22 + roles/proxysql/tasks/setvars.yml | 8 + roles/proxysql/tasks/users.yml | 16 + roles/proxysql/templates/client.my.cnf.j2 | 6 + roles/proxysql/templates/proxysql.cnf.j2 | 21 + roles/proxysql/vars/main.yml | 385 +++++++++++ .../targets/setup_proxysql/defaults/main.yml | 5 + .../targets/setup_proxysql/tasks/config.yml | 12 + .../targets/setup_proxysql/tasks/install.yml | 28 + .../targets/setup_proxysql/tasks/main.yml | 3 + .../targets/setup_proxysql/vars/main.yml | 13 + .../defaults/main.yml | 8 + .../meta/main.yml | 3 + .../tasks/base_test.yml | 57 ++ .../tasks/cleanup_test_servers.yml | 12 + .../tasks/main.yml | 83 +++ .../tasks/setup_test_server.yml | 12 + .../tasks/teardown.yml | 6 + .../tasks/test_create_backend_server.yml | 31 + ...t_create_backend_server_in_memory_only.yml | 31 + ...te_backend_server_with_delayed_persist.yml | 31 + .../tasks/test_create_using_check_mode.yml | 30 + .../tasks/test_delete_backend_server.yml | 31 + ...t_delete_backend_server_in_memory_only.yml | 31 + ...te_backend_server_with_delayed_persist.yml | 31 + .../tasks/test_delete_using_check_mode.yml | 30 + .../defaults/main.yml | 8 + .../meta/main.yml | 3 + .../tasks/base_test.yml | 54 ++ .../tasks/cleanup_global_variables.yml | 12 + .../tasks/main.yml | 48 ++ .../tasks/setup_global_variables.yml | 12 + .../tasks/setvars.yml | 12 + .../tasks/teardown.yml | 6 + .../tasks/test_update_variable_value.yml | 31 + ...t_update_variable_value_in_memory_only.yml | 31 + ...update_variable_value_using_check_mode.yml | 30 + ...te_variable_value_with_delayed_persist.yml | 31 + .../defaults/main.yml | 8 + .../test_proxysql_mysql_users/meta/main.yml | 3 + .../tasks/base_test.yml | 57 ++ .../tasks/cleanup_test_users.yml | 12 + .../test_proxysql_mysql_users/tasks/main.yml | 83 +++ .../tasks/setup_test_user.yml | 12 + .../tasks/teardown.yml | 6 + .../tasks/test_create_mysql_user.yml | 31 + .../test_create_mysql_user_in_memory_only.yml | 31 + ...create_mysql_user_with_delayed_persist.yml | 31 + .../tasks/test_create_using_check_mode.yml | 30 + .../tasks/test_delete_mysql_user.yml | 31 + .../test_delete_mysql_user_in_memory_only.yml | 31 + ...delete_mysql_user_with_delayed_persist.yml | 31 + .../tasks/test_delete_using_check_mode.yml | 30 + .../defaults/main.yml | 12 + .../test_proxysql_query_rules/meta/main.yml | 3 + .../tasks/base_test.yml | 61 ++ .../tasks/cleanup_test_query_rules.yml | 12 + .../test_proxysql_query_rules/tasks/main.yml | 83 +++ .../tasks/setup_test_query_rule.yml | 12 + .../tasks/teardown.yml | 6 + .../tasks/test_create_query_rule.yml | 31 + .../test_create_query_rule_in_memory_only.yml | 31 + ...create_query_rule_with_delayed_persist.yml | 31 + .../tasks/test_create_using_check_mode.yml | 30 + .../tasks/test_delete_query_rule.yml | 31 + .../test_delete_query_rule_in_memory_only.yml | 30 + ...delete_query_rule_with_delayed_persist.yml | 31 + .../tasks/test_delete_using_check_mode.yml | 30 + .../defaults/main.yml | 9 + .../meta/main.yml | 3 + .../tasks/base_test.yml | 58 ++ .../cleanup_test_replication_hostgroups.yml | 12 + .../tasks/main.yml | 83 +++ .../setup_test_replication_hostgroups.yml | 12 + .../tasks/teardown.yml | 6 + .../test_create_replication_hostgroups.yml | 31 + ..._replication_hostgroups_in_memory_only.yml | 31 + ...cation_hostgroups_with_delayed_persist.yml | 31 + .../tasks/test_create_using_check_mode.yml | 30 + .../test_delete_replication_hostgroups.yml | 31 + ..._replication_hostgroups_in_memory_only.yml | 30 + ...cation_hostgroups_with_delayed_persist.yml | 31 + .../tasks/test_delete_using_check_mode.yml | 30 + .../test_proxysql_scheduler/defaults/main.yml | 9 + .../test_proxysql_scheduler/meta/main.yml | 3 + .../tasks/base_test.yml | 58 ++ .../tasks/cleanup_test_schedulers.yml | 12 + .../test_proxysql_scheduler/tasks/main.yml | 83 +++ .../tasks/setup_test_scheduler.yml | 12 + .../tasks/teardown.yml | 6 + .../tasks/test_create_scheduler.yml | 31 + .../test_create_scheduler_in_memory_only.yml | 31 + ..._create_scheduler_with_delayed_persist.yml | 31 + .../tasks/test_create_using_check_mode.yml | 30 + .../tasks/test_delete_scheduler.yml | 31 + .../test_delete_scheduler_in_memory_only.yml | 30 + ..._delete_scheduler_with_delayed_persist.yml | 31 + .../tasks/test_delete_using_check_mode.yml | 30 + tests/sanity/ignore-2.10.txt | 2 + tests/sanity/ignore-2.11.txt | 2 + tests/sanity/ignore-2.9.txt | 2 + 128 files changed, 6826 insertions(+), 94 deletions(-) create mode 100644 .github/workflows/ansible-test-plugins.yml create mode 100644 .github/workflows/ansible-test-roles.yml delete mode 100644 .github/workflows/ansible-test.yml create mode 100644 galaxy.yml create mode 100644 plugins/README.md create mode 100644 plugins/doc_fragments/proxysql.py create mode 100644 plugins/module_utils/mysql.py create mode 100644 plugins/modules/proxysql_backend_servers.py create mode 100644 plugins/modules/proxysql_global_variables.py create mode 100644 plugins/modules/proxysql_manage_config.py create mode 100644 plugins/modules/proxysql_mysql_users.py create mode 100644 plugins/modules/proxysql_query_rules.py create mode 100644 plugins/modules/proxysql_replication_hostgroups.py create mode 100644 plugins/modules/proxysql_scheduler.py create mode 100644 roles/proxysql/.yamllint create mode 100644 roles/proxysql/README.md create mode 100644 roles/proxysql/defaults/main.yml create mode 100644 roles/proxysql/handlers/main.yml create mode 100644 roles/proxysql/meta/main.yml create mode 100644 roles/proxysql/molecule/default/converge.yml create mode 100644 roles/proxysql/molecule/default/molecule.yml create mode 100644 roles/proxysql/molecule/default/prepare.yml create mode 100644 roles/proxysql/molecule/default/tests/test_default.py create mode 100644 roles/proxysql/tasks/config.yml create mode 100644 roles/proxysql/tasks/install.yml create mode 100644 roles/proxysql/tasks/main.yml create mode 100644 roles/proxysql/tasks/setvars.yml create mode 100644 roles/proxysql/tasks/users.yml create mode 100644 roles/proxysql/templates/client.my.cnf.j2 create mode 100644 roles/proxysql/templates/proxysql.cnf.j2 create mode 100644 roles/proxysql/vars/main.yml create mode 100644 tests/integration/targets/setup_proxysql/defaults/main.yml create mode 100644 tests/integration/targets/setup_proxysql/tasks/config.yml create mode 100644 tests/integration/targets/setup_proxysql/tasks/install.yml create mode 100644 tests/integration/targets/setup_proxysql/tasks/main.yml create mode 100644 tests/integration/targets/setup_proxysql/vars/main.yml create mode 100644 tests/integration/targets/test_proxysql_backend_servers/defaults/main.yml create mode 100644 tests/integration/targets/test_proxysql_backend_servers/meta/main.yml create mode 100644 tests/integration/targets/test_proxysql_backend_servers/tasks/base_test.yml create mode 100644 tests/integration/targets/test_proxysql_backend_servers/tasks/cleanup_test_servers.yml create mode 100644 tests/integration/targets/test_proxysql_backend_servers/tasks/main.yml create mode 100644 tests/integration/targets/test_proxysql_backend_servers/tasks/setup_test_server.yml create mode 100644 tests/integration/targets/test_proxysql_backend_servers/tasks/teardown.yml create mode 100644 tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server.yml create mode 100644 tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server_in_memory_only.yml create mode 100644 tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server_with_delayed_persist.yml create mode 100644 tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_using_check_mode.yml create mode 100644 tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server.yml create mode 100644 tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server_in_memory_only.yml create mode 100644 tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server_with_delayed_persist.yml create mode 100644 tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_using_check_mode.yml create mode 100644 tests/integration/targets/test_proxysql_global_variables/defaults/main.yml create mode 100644 tests/integration/targets/test_proxysql_global_variables/meta/main.yml create mode 100644 tests/integration/targets/test_proxysql_global_variables/tasks/base_test.yml create mode 100644 tests/integration/targets/test_proxysql_global_variables/tasks/cleanup_global_variables.yml create mode 100644 tests/integration/targets/test_proxysql_global_variables/tasks/main.yml create mode 100644 tests/integration/targets/test_proxysql_global_variables/tasks/setup_global_variables.yml create mode 100644 tests/integration/targets/test_proxysql_global_variables/tasks/setvars.yml create mode 100644 tests/integration/targets/test_proxysql_global_variables/tasks/teardown.yml create mode 100644 tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value.yml create mode 100644 tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_in_memory_only.yml create mode 100644 tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_using_check_mode.yml create mode 100644 tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_with_delayed_persist.yml create mode 100644 tests/integration/targets/test_proxysql_mysql_users/defaults/main.yml create mode 100644 tests/integration/targets/test_proxysql_mysql_users/meta/main.yml create mode 100644 tests/integration/targets/test_proxysql_mysql_users/tasks/base_test.yml create mode 100644 tests/integration/targets/test_proxysql_mysql_users/tasks/cleanup_test_users.yml create mode 100644 tests/integration/targets/test_proxysql_mysql_users/tasks/main.yml create mode 100644 tests/integration/targets/test_proxysql_mysql_users/tasks/setup_test_user.yml create mode 100644 tests/integration/targets/test_proxysql_mysql_users/tasks/teardown.yml create mode 100644 tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user.yml create mode 100644 tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user_in_memory_only.yml create mode 100644 tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user_with_delayed_persist.yml create mode 100644 tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_using_check_mode.yml create mode 100644 tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user.yml create mode 100644 tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user_in_memory_only.yml create mode 100644 tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user_with_delayed_persist.yml create mode 100644 tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_using_check_mode.yml create mode 100644 tests/integration/targets/test_proxysql_query_rules/defaults/main.yml create mode 100644 tests/integration/targets/test_proxysql_query_rules/meta/main.yml create mode 100644 tests/integration/targets/test_proxysql_query_rules/tasks/base_test.yml create mode 100644 tests/integration/targets/test_proxysql_query_rules/tasks/cleanup_test_query_rules.yml create mode 100644 tests/integration/targets/test_proxysql_query_rules/tasks/main.yml create mode 100644 tests/integration/targets/test_proxysql_query_rules/tasks/setup_test_query_rule.yml create mode 100644 tests/integration/targets/test_proxysql_query_rules/tasks/teardown.yml create mode 100644 tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule.yml create mode 100644 tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule_in_memory_only.yml create mode 100644 tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule_with_delayed_persist.yml create mode 100644 tests/integration/targets/test_proxysql_query_rules/tasks/test_create_using_check_mode.yml create mode 100644 tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule.yml create mode 100644 tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule_in_memory_only.yml create mode 100644 tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule_with_delayed_persist.yml create mode 100644 tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_using_check_mode.yml create mode 100644 tests/integration/targets/test_proxysql_replication_hostgroups/defaults/main.yml create mode 100644 tests/integration/targets/test_proxysql_replication_hostgroups/meta/main.yml create mode 100644 tests/integration/targets/test_proxysql_replication_hostgroups/tasks/base_test.yml create mode 100644 tests/integration/targets/test_proxysql_replication_hostgroups/tasks/cleanup_test_replication_hostgroups.yml create mode 100644 tests/integration/targets/test_proxysql_replication_hostgroups/tasks/main.yml create mode 100644 tests/integration/targets/test_proxysql_replication_hostgroups/tasks/setup_test_replication_hostgroups.yml create mode 100644 tests/integration/targets/test_proxysql_replication_hostgroups/tasks/teardown.yml create mode 100644 tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups.yml create mode 100644 tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups_in_memory_only.yml create mode 100644 tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups_with_delayed_persist.yml create mode 100644 tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_using_check_mode.yml create mode 100644 tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups.yml create mode 100644 tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups_in_memory_only.yml create mode 100644 tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups_with_delayed_persist.yml create mode 100644 tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_using_check_mode.yml create mode 100644 tests/integration/targets/test_proxysql_scheduler/defaults/main.yml create mode 100644 tests/integration/targets/test_proxysql_scheduler/meta/main.yml create mode 100644 tests/integration/targets/test_proxysql_scheduler/tasks/base_test.yml create mode 100644 tests/integration/targets/test_proxysql_scheduler/tasks/cleanup_test_schedulers.yml create mode 100644 tests/integration/targets/test_proxysql_scheduler/tasks/main.yml create mode 100644 tests/integration/targets/test_proxysql_scheduler/tasks/setup_test_scheduler.yml create mode 100644 tests/integration/targets/test_proxysql_scheduler/tasks/teardown.yml create mode 100644 tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler.yml create mode 100644 tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler_in_memory_only.yml create mode 100644 tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler_with_delayed_persist.yml create mode 100644 tests/integration/targets/test_proxysql_scheduler/tasks/test_create_using_check_mode.yml create mode 100644 tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler.yml create mode 100644 tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler_in_memory_only.yml create mode 100644 tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler_with_delayed_persist.yml create mode 100644 tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_using_check_mode.yml create mode 100644 tests/sanity/ignore-2.10.txt create mode 100644 tests/sanity/ignore-2.11.txt create mode 100644 tests/sanity/ignore-2.9.txt diff --git a/.github/workflows/ansible-test-plugins.yml b/.github/workflows/ansible-test-plugins.yml new file mode 100644 index 0000000..5afa0ec --- /dev/null +++ b/.github/workflows/ansible-test-plugins.yml @@ -0,0 +1,94 @@ +name: Plugins CI +on: + push: + paths: + - 'plugins/**' + - 'tests/**' + - '.github/workflows/ansible-test.yml' + pull_request: + paths: + - 'plugins/**' + - 'tests/**' + - '.github/workflows/ansible-test.yml' + schedule: + - cron: '0 6 * * *' + + +env: + proxysql_version_file: "./ansible_collections/community/proxysql/tests/integration/targets/setup_proxysql/defaults/main.yml" + +jobs: + sanity: + name: "Sanity (Python: ${{ matrix.python }}, Ansible: ${{ matrix.ansible }})" + runs-on: ubuntu-latest + strategy: + matrix: + ansible: + - stable-2.9 + - stable-2.10 + - devel + python: + - 2.7 + - 3.8 + steps: + + - name: Check out code + uses: actions/checkout@v2 + with: + path: ansible_collections/community/proxysql + + - name: Set up Python ${{ matrix.python }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + + - name: Install ansible-base (${{ matrix.ansible }}) + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check + + - name: Run sanity tests + run: ansible-test sanity --docker -v --color + working-directory: ./ansible_collections/community/proxysql + + integration: + name: "Integration (Python: ${{ matrix.python }}, Ansible: ${{ matrix.ansible }}, ProxySQL: ${{ matrix.proxysql }})" + runs-on: ubuntu-latest + strategy: + fail-fast: false + matrix: + proxysql: + - 2.0.12 + ansible: + - stable-2.9 + - stable-2.10 + - devel + python: + - 3.6 + steps: + + - name: Check out code + uses: actions/checkout@v2 + with: + path: ansible_collections/community/proxysql + + - name: Set up Python ${{ matrix.python }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + + - name: Install ansible-base (${{ matrix.ansible }}) + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check + + - name: Set ProxySQL version (${{ matrix.proxysql }}) + run: "sed -i 's/^proxysql_version:.*/proxysql_version: \"${{ matrix.proxysql }}\"/g' ${{ env.proxysql_version_file }}" + + - name: Run integration tests + run: ansible-test integration --docker -v --color --retry-on-error --continue-on-error --python ${{ matrix.python }} --diff --coverage + working-directory: ./ansible_collections/community/proxysql + + - name: Generate coverage report. + run: ansible-test coverage xml -v --requirements --group-by command --group-by version + working-directory: ./ansible_collections/community/proxysql + + - uses: codecov/codecov-action@v1 + with: + fail_ci_if_error: false diff --git a/.github/workflows/ansible-test-roles.yml b/.github/workflows/ansible-test-roles.yml new file mode 100644 index 0000000..796b33d --- /dev/null +++ b/.github/workflows/ansible-test-roles.yml @@ -0,0 +1,56 @@ +name: Roles CI +on: + push: + paths: + - 'roles/**' + - '.github/workflows/ansible-test-roles.yml' + pull_request: + paths: + - 'roles/**' + - '.github/workflows/ansible-test-roles.yml' + schedule: + - cron: '0 6 * * *' + +jobs: + molecule: + name: "Molecule (Python: ${{ matrix.python }}, Ansible: ${{ matrix.ansible }}, ProxySQL: ${{ matrix.proxysql }})" + runs-on: ubuntu-latest + env: + PY_COLORS: 1 + ANSIBLE_FORCE_COLOR: 1 + strategy: + matrix: + proxysql: + - 2.0.12 + ansible: + - stable-2.9 + ### it looks like there's errors for 2.10+ with ansible-lint (https://github.com/ansible/ansible-lint/pull/878) + ### and molecule (_maybe_ relating to https://github.com/ansible-community/molecule/pull/2547) + # - stable-2.10 + # - devel + python: + - 2.7 + - 3.8 + + steps: + + - name: Check out code + uses: actions/checkout@v2 + with: + path: ansible_collections/community/proxysql + + - name: Set up Python ${{ matrix.python }} + uses: actions/setup-python@v2 + with: + python-version: ${{ matrix.python }} + + - name: Install ansible-base (${{ matrix.ansible }}) + run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check + + - name: Install molecule and related dependencies + run: | + pip install ansible-lint docker flake8 molecule testinfra yamllint + + - name: Run molecule default test scenario + run: for d in roles/*/; do (cd "$d" && molecule --version && molecule test) done + working-directory: ./ansible_collections/community/proxysql diff --git a/.github/workflows/ansible-test.yml b/.github/workflows/ansible-test.yml deleted file mode 100644 index c7bfbfa..0000000 --- a/.github/workflows/ansible-test.yml +++ /dev/null @@ -1,59 +0,0 @@ -name: CI -on: -- pull_request - -jobs: - sanity: - name: Sanity (${{ matrix.ansible }}) - strategy: - matrix: - ansible: - - stable-2.10 - - devel - runs-on: ubuntu-latest - steps: - - - name: Check out code - uses: actions/checkout@v1 - with: - path: ansible_collections/community/proxysql - - - name: Set up Python 3.6 - uses: actions/setup-python@v1 - with: - python-version: 3.6 - - - name: Install ansible-base (${{ matrix.ansible }}) - run: pip install https://github.com/ansible/ansible/archive/${{ matrix.ansible }}.tar.gz --disable-pip-version-check - - - name: Run sanity tests - run: ansible-test sanity --docker -v --color --python 3.6 - -# integration: -# runs-on: ubuntu-latest -# strategy: -# matrix: -# python_version: ["3.6"] -# container: -# image: python:${{ matrix.python_version }}-alpine -# steps: -# -# - name: Check out code -# uses: actions/checkout@v1 -# with: -# path: ansible_collections/community/proxysql -# -# - name: Install ansible-base (stable-2.10) -# run: pip install https://github.com/ansible/ansible/archive/stable-2.10.tar.gz --disable-pip-version-check -# -# - name: Run integration tests on Python ${{ matrix.python_version }} -# run: ansible-test integration --docker -v --color --retry-on-error --python ${{ matrix.python_version }} --continue-on-error --diff --coverage -# -# - name: Generate coverage report. -# run: ansible-test coverage xml -v --requirements --group-by command --group-by version -# # FIXME ansible_collections/NAMESPACE/COLLECTION -# working-directory: ./ansible_collections/community/FIXME -# -# - uses: codecov/codecov-action@v1 -# with: -# fail_ci_if_error: false diff --git a/.gitignore b/.gitignore index 96a58bf..cb85741 100644 --- a/.gitignore +++ b/.gitignore @@ -129,3 +129,6 @@ dmypy.json # Pyre type checker .pyre/ + +# MacOS +.DS_Store diff --git a/README.md b/README.md index f9a7ff5..00a0cd2 100644 --- a/README.md +++ b/README.md @@ -1,58 +1,56 @@ -# collection_template -You can build a new repository for an Ansible Collection using this template by following [Creating a repository from a template](https://help.github.com/en/github/creating-cloning-and-archiving-repositories/creating-a-repository-from-a-template). This README.md contains recommended headings for your collection README.md, with comments describing what each section should contain. Once you have created your collection repository, delete this paragraph and the title above it from your README.md. +# ProxySQL collection for Ansible +[![Plugins CI](https://github.com/ansible-collections/community.proxysql/workflows/Plugins%20CI/badge.svg?event=push)](https://github.com/ansible-collections/community.proxysql/actions?query=workflow%3A"Plugins+CI") [![Roles CI](https://github.com/ansible-collections/community.proxysql/workflows/Roles%20CI/badge.svg?event=push)](https://github.com/ansible-collections/community.proxysql/actions?query=workflow%3A"Roles+CI") [![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/community.proxysql)](https://codecov.io/gh/ansible-collections/community.proxysql) -# Foo Collection - -[![CI](https://github.com/ansible-collections/REPONAMEHERE/workflows/CI/badge.svg?event=push)](https://github.com/ansible-collections/REPONAMEHERE/actions) [![Codecov](https://img.shields.io/codecov/c/github/ansible-collections/REPONAMEHERE)](https://codecov.io/gh/ansible-collections/REPONAMEHERE) +## Included content - +- **Modules**: + - [proxysql_backend_servers](https://docs.ansible.com/ansible/latest/modules/proxysql_backend_servers_module.html) + - [proxysql_global_variables](https://docs.ansible.com/ansible/latest/modules/proxysql_global_variables_module.html) + - [proxysql_manage_config](https://docs.ansible.com/ansible/latest/modules/proxysql_manage_config_module.html) + - [proxysql_mysql_users](https://docs.ansible.com/ansible/latest/modules/proxysql_mysql_users_module.html) + - [proxysql_query_rules](https://docs.ansible.com/ansible/latest/modules/proxysql_query_rules_module.html) + - [proxysql_replication_hostgroups](https://docs.ansible.com/ansible/latest/modules/proxysql_replication_hostgroups_module.html) + - [proxysql_scheduler](https://docs.ansible.com/ansible/latest/modules/proxysql_scheduler_module.html) +- **Roles**: + - proxysql ## Tested with Ansible - +- 2.9 +- 2.10 +- devel ## External requirements - - -### Supported connections - - -## Included content +The ProxySQL modules rely on a MySQL connector. The list of supported drivers is below: - +- [PyMySQL](https://github.com/PyMySQL/PyMySQL) +- [MySQLdb](https://github.com/PyMySQL/mysqlclient-python) +- Support for other Python MySQL connectors may be added in a future release. ## Using this collection - - -See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details. - -## Contributing to this collection - - - +### Installing the Collection from Ansible Galaxy -## Release notes - +Before using the ProxySQL collection, you need to install it with the Ansible Galaxy CLI: -## Roadmap +```bash +ansible-galaxy collection install community.proxysql +``` - +You can also include it in a `requirements.yml` file and install it via `ansible-galaxy collection install -r requirements.yml`, using the format: -## More information +```yaml +--- +collections: + - name: community.proxysql + version: v0.1.0 +``` - - -- [Ansible Collection overview](https://github.com/ansible-collections/overview) -- [Ansible User guide](https://docs.ansible.com/ansible/latest/user_guide/index.html) -- [Ansible Developer guide](https://docs.ansible.com/ansible/latest/dev_guide/index.html) -- [Ansible Community code of conduct](https://docs.ansible.com/ansible/latest/community/code_of_conduct.html) +See [Ansible Using collections](https://docs.ansible.com/ansible/latest/user_guide/collections_using.html) for more details. ## Licensing - - GNU General Public License v3.0 or later. See [LICENSE](https://www.gnu.org/licenses/gpl-3.0.txt) to see the full text. diff --git a/galaxy.yml b/galaxy.yml new file mode 100644 index 0000000..f0d6f3b --- /dev/null +++ b/galaxy.yml @@ -0,0 +1,16 @@ +namespace: community +name: proxysql +version: 1.0.0 +readme: README.md +authors: + - Ben Mildren (@bmildren) +description: ProxySQL collection for Ansible +license_file: LICENSE +tags: + - database + - mysql + - proxysql +repository: https://github.com/ansible-collections/community.proxysql +documentation: https://github.com/ansible-collections/community.proxysql +homepage: https://github.com/ansible-collections/community.proxysql +issues: https://github.com/ansible-collections/community.proxysql/issues diff --git a/plugins/README.md b/plugins/README.md new file mode 100644 index 0000000..6541cf7 --- /dev/null +++ b/plugins/README.md @@ -0,0 +1,31 @@ +# Collections Plugins Directory + +This directory can be used to ship various plugins inside an Ansible collection. Each plugin is placed in a folder that +is named after the type of plugin it is in. It can also include the `module_utils` and `modules` directory that +would contain module utils and modules respectively. + +Here is an example directory of the majority of plugins currently supported by Ansible: + +``` +└── plugins + ├── action + ├── become + ├── cache + ├── callback + ├── cliconf + ├── connection + ├── filter + ├── httpapi + ├── inventory + ├── lookup + ├── module_utils + ├── modules + ├── netconf + ├── shell + ├── strategy + ├── terminal + ├── test + └── vars +``` + +A full list of plugin types can be found at [Working With Plugins](https://docs.ansible.com/ansible/2.9/plugins/plugins.html). \ No newline at end of file diff --git a/plugins/doc_fragments/proxysql.py b/plugins/doc_fragments/proxysql.py new file mode 100644 index 0000000..ae38b94 --- /dev/null +++ b/plugins/doc_fragments/proxysql.py @@ -0,0 +1,61 @@ +# -*- coding: utf-8 -*- + +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + + +class ModuleDocFragment(object): + + # Documentation fragment for ProxySQL connectivity + CONNECTIVITY = r''' +options: + login_user: + description: + - The username used to authenticate to ProxySQL admin interface. + type: str + login_password: + description: + - The password used to authenticate to ProxySQL admin interface. + type: str + login_host: + description: + - The host used to connect to ProxySQL admin interface. + type: str + default: '127.0.0.1' + login_unix_socket: + description: + - The socket used to connect to ProxySQL admin interface. + type: str + login_port: + description: + - The port used to connect to ProxySQL admin interface. + type: int + default: 6032 + config_file: + description: + - Specify a config file from which I(login_user) and I(login_password) + are to be read. + type: path + default: '' +requirements: + - PyMySQL (Python 2.7 and Python 3.X), or + - MySQLdb (Python 2.x) +''' + + # Documentation fragment for managing ProxySQL configuration + MANAGING_CONFIG = r''' +options: + save_to_disk: + description: + - Save config to sqlite db on disk to persist the configuration. + type: bool + default: 'yes' + load_to_runtime: + description: + - Dynamically load config to runtime memory. + type: bool + default: 'yes' +''' diff --git a/plugins/module_utils/mysql.py b/plugins/module_utils/mysql.py new file mode 100644 index 0000000..b5beb02 --- /dev/null +++ b/plugins/module_utils/mysql.py @@ -0,0 +1,110 @@ +# This code is part of Ansible, but is an independent component. +# This particular file snippet, and this file snippet only, is BSD licensed. +# Modules you write using this snippet, which is embedded dynamically by Ansible +# still belong to the author of the module, and may assign their own license +# to the complete work. +# +# Copyright (c), Jonathan Mainguy , 2015 +# Most of this was originally added by Sven Schliesing @muffl0n in the mysql_user.py module +# +# Simplified BSD License (see licenses/simplified_bsd.txt or https://opensource.org/licenses/BSD-2-Clause) + +from __future__ import (absolute_import, division, print_function) +__metaclass__ = type + +import os + +from ansible.module_utils.six.moves import configparser + +try: + import pymysql as mysql_driver + _mysql_cursor_param = 'cursor' +except ImportError: + try: + import MySQLdb as mysql_driver + import MySQLdb.cursors + _mysql_cursor_param = 'cursorclass' + except ImportError: + mysql_driver = None + +mysql_driver_fail_msg = 'The PyMySQL (Python 2.7 and Python 3.X) or MySQL-python (Python 2.X) module is required.' + + +def parse_from_mysql_config_file(cnf): + cp = configparser.ConfigParser() + cp.read(cnf) + return cp + + +def mysql_connect(module, login_user=None, login_password=None, config_file='', ssl_cert=None, + ssl_key=None, ssl_ca=None, db=None, cursor_class=None, + connect_timeout=30, autocommit=False, config_overrides_defaults=False): + config = {} + + if config_file and os.path.exists(config_file): + config['read_default_file'] = config_file + cp = parse_from_mysql_config_file(config_file) + # Override some commond defaults with values from config file if needed + if cp and cp.has_section('client') and config_overrides_defaults: + try: + module.params['login_host'] = cp.get('client', 'host', fallback=module.params['login_host']) + module.params['login_port'] = cp.getint('client', 'port', fallback=module.params['login_port']) + except Exception as e: + if "got an unexpected keyword argument 'fallback'" in e.message: + module.fail_json('To use config_overrides_defaults, ' + 'it needs Python 3.5+ as the default interpreter on a target host') + + if ssl_ca is not None or ssl_key is not None or ssl_cert is not None: + config['ssl'] = {} + + if module.params['login_unix_socket']: + config['unix_socket'] = module.params['login_unix_socket'] + else: + config['host'] = module.params['login_host'] + config['port'] = module.params['login_port'] + + # If login_user or login_password are given, they should override the + # config file + if login_user is not None: + config['user'] = login_user + if login_password is not None: + config['passwd'] = login_password + if ssl_cert is not None: + config['ssl']['cert'] = ssl_cert + if ssl_key is not None: + config['ssl']['key'] = ssl_key + if ssl_ca is not None: + config['ssl']['ca'] = ssl_ca + if db is not None: + config['db'] = db + if connect_timeout is not None: + config['connect_timeout'] = connect_timeout + + if _mysql_cursor_param == 'cursor': + # In case of PyMySQL driver: + db_connection = mysql_driver.connect(autocommit=autocommit, **config) + else: + # In case of MySQLdb driver + db_connection = mysql_driver.connect(**config) + if autocommit: + db_connection.autocommit(True) + + if cursor_class == 'DictCursor': + return db_connection.cursor(**{_mysql_cursor_param: mysql_driver.cursors.DictCursor}), db_connection + else: + return db_connection.cursor(), db_connection + + +def mysql_common_argument_spec(): + return dict( + login_user=dict(type='str', default=None), + login_password=dict(type='str', no_log=True), + login_host=dict(type='str', default='localhost'), + login_port=dict(type='int', default=3306), + login_unix_socket=dict(type='str'), + config_file=dict(type='path', default='~/.my.cnf'), + connect_timeout=dict(type='int', default=30), + client_cert=dict(type='path', aliases=['ssl_cert']), + client_key=dict(type='path', aliases=['ssl_key']), + ca_cert=dict(type='path', aliases=['ssl_ca']), + ) diff --git a/plugins/modules/proxysql_backend_servers.py b/plugins/modules/proxysql_backend_servers.py new file mode 100644 index 0000000..968e6c5 --- /dev/null +++ b/plugins/modules/proxysql_backend_servers.py @@ -0,0 +1,518 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_backend_servers +author: "Ben Mildren (@bmildren)" +short_description: Adds or removes mysql hosts from proxysql admin interface. +description: + - The M(community.general.proxysql_backend_servers) module adds or removes mysql hosts using + the proxysql admin interface. +options: + hostgroup_id: + description: + - The hostgroup in which this mysqld instance is included. An instance + can be part of one or more hostgroups. + type: int + default: 0 + hostname: + description: + - The ip address at which the mysqld instance can be contacted. + type: str + required: True + port: + description: + - The port at which the mysqld instance can be contacted. + type: int + default: 3306 + status: + description: + - ONLINE - Backend server is fully operational. + OFFLINE_SOFT - When a server is put into C(OFFLINE_SOFT) mode, + connections are kept in use until the current + transaction is completed. This allows to gracefully + detach a backend. + OFFLINE_HARD - When a server is put into C(OFFLINE_HARD) mode, the + existing connections are dropped, while new incoming + connections aren't accepted either. + + If omitted the proxysql database default for I(status) is C(ONLINE). + type: str + choices: [ "ONLINE", "OFFLINE_SOFT", "OFFLINE_HARD"] + weight: + description: + - The bigger the weight of a server relative to other weights, the higher + the probability of the server being chosen from the hostgroup. If + omitted the proxysql database default for I(weight) is 1. + type: int + compression: + description: + - If the value of I(compression) is greater than 0, new connections to + that server will use compression. If omitted the proxysql database + default for I(compression) is 0. + type: int + max_connections: + description: + - The maximum number of connections ProxySQL will open to this backend + server. If omitted the proxysql database default for I(max_connections) + is 1000. + type: int + max_replication_lag: + description: + - If greater than 0, ProxySQL will regularly monitor replication lag. If + replication lag goes above I(max_replication_lag), proxysql will + temporarily shun the server until replication catches up. If omitted + the proxysql database default for I(max_replication_lag) is 0. + type: int + use_ssl: + description: + - If I(use_ssl) is set to C(True), connections to this server will be + made using SSL connections. If omitted the proxysql database default + for I(use_ssl) is C(False). + type: bool + max_latency_ms: + description: + - Ping time is monitored regularly. If a host has a ping time greater + than I(max_latency_ms) it is excluded from the connection pool + (although the server stays ONLINE). If omitted the proxysql database + default for I(max_latency_ms) is 0. + type: int + comment: + description: + - Text field that can be used for any purposed defined by the user. + Could be a description of what the host stores, a reminder of when the + host was added or disabled, or a JSON processed by some checker script. + type: str + default: '' + state: + description: + - When C(present) - adds the host, when C(absent) - removes the host. + type: str + choices: [ "present", "absent" ] + default: present +extends_documentation_fragment: +- community.proxysql.proxysql.managing_config +- community.proxysql.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example adds a server, it saves the mysql server config to disk, but +# avoids loading the mysql server config to runtime (this might be because +# several servers are being added and the user wants to push the config to +# runtime in a single batch using the community.general.proxysql_manage_config +# module). It uses supplied credentials to connect to the proxysql admin +# interface. + +- name: Add a server + proxysql_backend_servers: + login_user: 'admin' + login_password: 'admin' + hostname: 'mysql01' + state: present + load_to_runtime: False + +# This example removes a server, saves the mysql server config to disk, and +# dynamically loads the mysql server config to runtime. It uses credentials +# in a supplied config file to connect to the proxysql admin interface. + +- name: Remove a server + proxysql_backend_servers: + config_file: '~/proxysql.cnf' + hostname: 'mysql02' + state: absent +''' + +RETURN = ''' +stdout: + description: The mysql host modified or removed from proxysql + returned: On create/update will return the newly modified host, on delete + it will return the deleted record. + type: dict + "sample": { + "changed": true, + "hostname": "192.168.52.1", + "msg": "Added server to mysql_hosts", + "server": { + "comment": "", + "compression": "0", + "hostgroup_id": "1", + "hostname": "192.168.52.1", + "max_connections": "1000", + "max_latency_ms": "0", + "max_replication_lag": "0", + "port": "3306", + "status": "ONLINE", + "use_ssl": "0", + "weight": "1" + }, + "state": "present" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.proxysql.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if module.params["port"] < 0 \ + or module.params["port"] > 65535: + module.fail_json( + msg="port must be a valid unix port number (0-65535)" + ) + + if module.params["compression"]: + if module.params["compression"] < 0 \ + or module.params["compression"] > 102400: + module.fail_json( + msg="compression must be set between 0 and 102400" + ) + + if module.params["max_replication_lag"]: + if module.params["max_replication_lag"] < 0 \ + or module.params["max_replication_lag"] > 126144000: + module.fail_json( + msg="max_replication_lag must be set between 0 and 102400" + ) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def save_config_to_disk(cursor): + cursor.execute("SAVE MYSQL SERVERS TO DISK") + return True + + +def load_config_to_runtime(cursor): + cursor.execute("LOAD MYSQL SERVERS TO RUNTIME") + return True + + +class ProxySQLServer(object): + + def __init__(self, module): + self.state = module.params["state"] + self.save_to_disk = module.params["save_to_disk"] + self.load_to_runtime = module.params["load_to_runtime"] + + self.hostgroup_id = module.params["hostgroup_id"] + self.hostname = module.params["hostname"] + self.port = module.params["port"] + + config_data_keys = ["status", + "weight", + "compression", + "max_connections", + "max_replication_lag", + "use_ssl", + "max_latency_ms", + "comment"] + + self.config_data = dict((k, module.params[k]) + for k in config_data_keys) + + def check_server_config_exists(self, cursor): + query_string = \ + """SELECT count(*) AS `host_count` + FROM mysql_servers + WHERE hostgroup_id = %s + AND hostname = %s + AND port = %s""" + + query_data = \ + [self.hostgroup_id, + self.hostname, + self.port] + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + return (int(check_count['host_count']) > 0) + + def check_server_config(self, cursor): + query_string = \ + """SELECT count(*) AS `host_count` + FROM mysql_servers + WHERE hostgroup_id = %s + AND hostname = %s + AND port = %s""" + + query_data = \ + [self.hostgroup_id, + self.hostname, + self.port] + + for col, val in iteritems(self.config_data): + if val is not None: + query_data.append(val) + query_string += "\n AND " + col + " = %s" + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + + if isinstance(check_count, tuple): + return int(check_count[0]) > 0 + + return (int(check_count['host_count']) > 0) + + def get_server_config(self, cursor): + query_string = \ + """SELECT * + FROM mysql_servers + WHERE hostgroup_id = %s + AND hostname = %s + AND port = %s""" + + query_data = \ + [self.hostgroup_id, + self.hostname, + self.port] + + cursor.execute(query_string, query_data) + server = cursor.fetchone() + return server + + def create_server_config(self, cursor): + query_string = \ + """INSERT INTO mysql_servers ( + hostgroup_id, + hostname, + port""" + + cols = 3 + query_data = \ + [self.hostgroup_id, + self.hostname, + self.port] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + query_string += ",\n" + col + + query_string += \ + (")\n" + + "VALUES (" + + "%s ," * cols) + + query_string = query_string[:-2] + query_string += ")" + + cursor.execute(query_string, query_data) + return True + + def update_server_config(self, cursor): + query_string = """UPDATE mysql_servers""" + + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + if cols == 1: + query_string += "\nSET " + col + "= %s," + else: + query_string += "\n " + col + " = %s," + + query_string = query_string[:-1] + query_string += ("\nWHERE hostgroup_id = %s\n AND hostname = %s" + + "\n AND port = %s") + + query_data.append(self.hostgroup_id) + query_data.append(self.hostname) + query_data.append(self.port) + + cursor.execute(query_string, query_data) + return True + + def delete_server_config(self, cursor): + query_string = \ + """DELETE FROM mysql_servers + WHERE hostgroup_id = %s + AND hostname = %s + AND port = %s""" + + query_data = \ + [self.hostgroup_id, + self.hostname, + self.port] + + cursor.execute(query_string, query_data) + return True + + def manage_config(self, cursor, state): + if state: + if self.save_to_disk: + save_config_to_disk(cursor) + if self.load_to_runtime: + load_config_to_runtime(cursor) + + def create_server(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.create_server_config(cursor) + result['msg'] = "Added server to mysql_hosts" + result['server'] = \ + self.get_server_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Server would have been added to" + + " mysql_hosts, however check_mode" + + " is enabled.") + + def update_server(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.update_server_config(cursor) + result['msg'] = "Updated server in mysql_hosts" + result['server'] = \ + self.get_server_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Server would have been updated in" + + " mysql_hosts, however check_mode" + + " is enabled.") + + def delete_server(self, check_mode, result, cursor): + if not check_mode: + result['server'] = \ + self.get_server_config(cursor) + result['changed'] = \ + self.delete_server_config(cursor) + result['msg'] = "Deleted server from mysql_hosts" + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Server would have been deleted from" + + " mysql_hosts, however check_mode is" + + " enabled.") + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default='127.0.0.1'), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default='', type='path'), + hostgroup_id=dict(default=0, type='int'), + hostname=dict(required=True, type='str'), + port=dict(default=3306, type='int'), + status=dict(choices=['ONLINE', + 'OFFLINE_SOFT', + 'OFFLINE_HARD']), + weight=dict(type='int'), + compression=dict(type='int'), + max_connections=dict(type='int'), + max_replication_lag=dict(type='int'), + use_ssl=dict(type='bool'), + max_latency_ms=dict(type='int'), + comment=dict(default='', type='str'), + state=dict(default='present', choices=['present', + 'absent']), + save_to_disk=dict(default=True, type='bool'), + load_to_runtime=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file, + cursor_class='DictCursor') + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + proxysql_server = ProxySQLServer(module) + result = {} + + result['state'] = proxysql_server.state + if proxysql_server.hostname: + result['hostname'] = proxysql_server.hostname + + if proxysql_server.state == "present": + try: + if not proxysql_server.check_server_config(cursor): + if not proxysql_server.check_server_config_exists(cursor): + proxysql_server.create_server(module.check_mode, + result, + cursor) + else: + proxysql_server.update_server(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The server already exists in mysql_hosts" + + " and doesn't need to be updated.") + result['server'] = \ + proxysql_server.get_server_config(cursor) + except mysql_driver.Error as e: + module.fail_json( + msg="unable to modify server.. %s" % to_native(e) + ) + + elif proxysql_server.state == "absent": + try: + if proxysql_server.check_server_config_exists(cursor): + proxysql_server.delete_server(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The server is already absent from the" + + " mysql_hosts memory configuration") + except mysql_driver.Error as e: + module.fail_json( + msg="unable to remove server.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/proxysql_global_variables.py b/plugins/modules/proxysql_global_variables.py new file mode 100644 index 0000000..e857714 --- /dev/null +++ b/plugins/modules/proxysql_global_variables.py @@ -0,0 +1,269 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_global_variables +author: "Ben Mildren (@bmildren)" +short_description: Gets or sets the proxysql global variables. +description: + - The M(community.general.proxysql_global_variables) module gets or sets the proxysql global + variables. +options: + variable: + description: + - Defines which variable should be returned, or if I(value) is specified + which variable should be updated. + type: str + required: True + value: + description: + - Defines a value the variable specified using I(variable) should be set + to. + type: str +extends_documentation_fragment: +- community.proxysql.proxysql.managing_config +- community.proxysql.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example sets the value of a variable, saves the mysql admin variables +# config to disk, and dynamically loads the mysql admin variables config to +# runtime. It uses supplied credentials to connect to the proxysql admin +# interface. + +- name: Set the value of a variable + proxysql_global_variables: + login_user: 'admin' + login_password: 'admin' + variable: 'mysql-max_connections' + value: 4096 + +# This example gets the value of a variable. It uses credentials in a +# supplied config file to connect to the proxysql admin interface. + +- name: Get the value of a variable + proxysql_global_variables: + config_file: '~/proxysql.cnf' + variable: 'mysql-default_query_delay' +''' + +RETURN = ''' +stdout: + description: Returns the mysql variable supplied with it's associated value. + returned: Returns the current variable and value, or the newly set value + for the variable supplied.. + type: dict + "sample": { + "changed": false, + "msg": "The variable is already been set to the supplied value", + "var": { + "variable_name": "mysql-poll_timeout", + "variable_value": "3000" + } + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.proxysql.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def save_config_to_disk(variable, cursor): + if variable.startswith("admin"): + cursor.execute("SAVE ADMIN VARIABLES TO DISK") + else: + cursor.execute("SAVE MYSQL VARIABLES TO DISK") + return True + + +def load_config_to_runtime(variable, cursor): + if variable.startswith("admin"): + cursor.execute("LOAD ADMIN VARIABLES TO RUNTIME") + else: + cursor.execute("LOAD MYSQL VARIABLES TO RUNTIME") + return True + + +def check_config(variable, value, cursor): + query_string = \ + """SELECT count(*) AS `variable_count` + FROM global_variables + WHERE variable_name = %s and variable_value = %s""" + + query_data = \ + [variable, value] + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + + if isinstance(check_count, tuple): + return int(check_count[0]) > 0 + + return (int(check_count['variable_count']) > 0) + + +def get_config(variable, cursor): + + query_string = \ + """SELECT * + FROM global_variables + WHERE variable_name = %s""" + + query_data = \ + [variable, ] + + cursor.execute(query_string, query_data) + row_count = cursor.rowcount + resultset = cursor.fetchone() + + if row_count > 0: + return resultset + else: + return False + + +def set_config(variable, value, cursor): + + query_string = \ + """UPDATE global_variables + SET variable_value = %s + WHERE variable_name = %s""" + + query_data = \ + [value, variable] + + cursor.execute(query_string, query_data) + return True + + +def manage_config(variable, save_to_disk, load_to_runtime, cursor, state): + if state: + if save_to_disk: + save_config_to_disk(variable, cursor) + if load_to_runtime: + load_config_to_runtime(variable, cursor) + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default="127.0.0.1"), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default="", type='path'), + variable=dict(required=True, type='str'), + value=dict(), + save_to_disk=dict(default=True, type='bool'), + load_to_runtime=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + variable = module.params["variable"] + value = module.params["value"] + save_to_disk = module.params["save_to_disk"] + load_to_runtime = module.params["load_to_runtime"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file, + cursor_class='DictCursor') + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + result = {} + + if not value: + try: + if get_config(variable, cursor): + result['changed'] = False + result['msg'] = \ + "Returned the variable and it's current value" + result['var'] = get_config(variable, cursor) + else: + module.fail_json( + msg="The variable \"%s\" was not found" % variable + ) + + except mysql_driver.Error as e: + module.fail_json( + msg="unable to get config.. %s" % to_native(e) + ) + else: + try: + if get_config(variable, cursor): + if not check_config(variable, value, cursor): + if not module.check_mode: + result['changed'] = set_config(variable, value, cursor) + result['msg'] = \ + "Set the variable to the supplied value" + result['var'] = get_config(variable, cursor) + manage_config(variable, + save_to_disk, + load_to_runtime, + cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Variable would have been set to" + + " the supplied value, however" + + " check_mode is enabled.") + else: + result['changed'] = False + result['msg'] = ("The variable is already been set to" + + " the supplied value") + result['var'] = get_config(variable, cursor) + else: + module.fail_json( + msg="The variable \"%s\" was not found" % variable + ) + + except mysql_driver.Error as e: + module.fail_json( + msg="unable to set config.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/proxysql_manage_config.py b/plugins/modules/proxysql_manage_config.py new file mode 100644 index 0000000..6274090 --- /dev/null +++ b/plugins/modules/proxysql_manage_config.py @@ -0,0 +1,218 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_manage_config + +author: "Ben Mildren (@bmildren)" +short_description: Writes the proxysql configuration settings between layers. +description: + - The M(community.general.proxysql_global_variables) module writes the proxysql configuration + settings between layers. Currently this module will always report a + changed state, so should typically be used with WHEN however this will + change in a future version when the CHECKSUM table commands are available + for all tables in proxysql. +options: + action: + description: + - The supplied I(action) combines with the supplied I(direction) to + provide the semantics of how we want to move the I(config_settings) + between the I(config_layers). + type: str + choices: [ "LOAD", "SAVE" ] + required: True + config_settings: + description: + - The I(config_settings) specifies which configuration we're writing. + type: str + choices: [ "MYSQL USERS", "MYSQL SERVERS", "MYSQL QUERY RULES", + "MYSQL VARIABLES", "ADMIN VARIABLES", "SCHEDULER" ] + required: True + direction: + description: + - FROM - denotes we're reading values FROM the supplied I(config_layer) + and writing to the next layer. + TO - denotes we're reading from the previous layer and writing TO the + supplied I(config_layer)." + type: str + choices: [ "FROM", "TO" ] + required: True + config_layer: + description: + - RUNTIME - represents the in-memory data structures of ProxySQL used by + the threads that are handling the requests. + MEMORY - (sometimes also referred as main) represents the in-memory + SQLite3 database. + DISK - represents the on-disk SQLite3 database. + CONFIG - is the classical config file. You can only LOAD FROM the + config file. + type: str + choices: [ "MEMORY", "DISK", "RUNTIME", "CONFIG" ] + required: True +extends_documentation_fragment: +- community.proxysql.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example saves the mysql users config from memory to disk. It uses +# supplied credentials to connect to the proxysql admin interface. + +- name: Save the mysql users config from memory to disk + proxysql_manage_config: + login_user: 'admin' + login_password: 'admin' + action: "SAVE" + config_settings: "MYSQL USERS" + direction: "FROM" + config_layer: "MEMORY" + +# This example loads the mysql query rules config from memory to runtime. It +# uses supplied credentials to connect to the proxysql admin interface. + +- name: Load the mysql query rules config from memory to runtime + proxysql_manage_config: + config_file: '~/proxysql.cnf' + action: "LOAD" + config_settings: "MYSQL QUERY RULES" + direction: "TO" + config_layer: "RUNTIME" +''' + +RETURN = ''' +stdout: + description: Simply reports whether the action reported a change. + returned: Currently the returned value with always be changed=True. + type: dict + "sample": { + "changed": true + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.proxysql.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if module.params["config_layer"] == 'CONFIG' and \ + (module.params["action"] != 'LOAD' or + module.params["direction"] != 'FROM'): + + if (module.params["action"] != 'LOAD' and + module.params["direction"] != 'FROM'): + msg_string = ("Neither the action \"%s\" nor the direction" + + " \"%s\" are valid combination with the CONFIG" + + " config_layer") + module.fail_json(msg=msg_string % (module.params["action"], + module.params["direction"])) + + elif module.params["action"] != 'LOAD': + msg_string = ("The action \"%s\" is not a valid combination" + + " with the CONFIG config_layer") + module.fail_json(msg=msg_string % module.params["action"]) + + else: + msg_string = ("The direction \"%s\" is not a valid combination" + + " with the CONFIG config_layer") + module.fail_json(msg=msg_string % module.params["direction"]) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def manage_config(manage_config_settings, cursor): + + query_string = "%s" % ' '.join(manage_config_settings) + + cursor.execute(query_string) + return True + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default="127.0.0.1"), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default="", type='path'), + action=dict(required=True, choices=['LOAD', + 'SAVE']), + config_settings=dict(required=True, choices=['MYSQL USERS', + 'MYSQL SERVERS', + 'MYSQL QUERY RULES', + 'MYSQL VARIABLES', + 'ADMIN VARIABLES', + 'SCHEDULER']), + direction=dict(required=True, choices=['FROM', + 'TO']), + config_layer=dict(required=True, choices=['MEMORY', + 'DISK', + 'RUNTIME', + 'CONFIG']) + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + action = module.params["action"] + config_settings = module.params["config_settings"] + direction = module.params["direction"] + config_layer = module.params["config_layer"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file) + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + result = {} + + manage_config_settings = \ + [action, config_settings, direction, config_layer] + + try: + result['changed'] = manage_config(manage_config_settings, + cursor) + except mysql_driver.Error as e: + module.fail_json( + msg="unable to manage config.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/proxysql_mysql_users.py b/plugins/modules/proxysql_mysql_users.py new file mode 100644 index 0000000..9812384 --- /dev/null +++ b/plugins/modules/proxysql_mysql_users.py @@ -0,0 +1,481 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_mysql_users +author: "Ben Mildren (@bmildren)" +short_description: Adds or removes mysql users from proxysql admin interface. +description: + - The M(community.general.proxysql_mysql_users) module adds or removes mysql users using the + proxysql admin interface. +options: + username: + description: + - Name of the user connecting to the mysqld or ProxySQL instance. + type: str + required: True + password: + description: + - Password of the user connecting to the mysqld or ProxySQL instance. + type: str + active: + description: + - A user with I(active) set to C(False) will be tracked in the database, + but will be never loaded in the in-memory data structures. If omitted + the proxysql database default for I(active) is C(True). + type: bool + use_ssl: + description: + - If I(use_ssl) is set to C(True), connections by this user will be made + using SSL connections. If omitted the proxysql database default for + I(use_ssl) is C(False). + type: bool + default_hostgroup: + description: + - If there is no matching rule for the queries sent by this user, the + traffic it generates is sent to the specified hostgroup. + If omitted the proxysql database default for I(use_ssl) is 0. + type: int + default_schema: + description: + - The schema to which the connection should change to by default. + type: str + transaction_persistent: + description: + - If this is set for the user with which the MySQL client is connecting + to ProxySQL (thus a "frontend" user), transactions started within a + hostgroup will remain within that hostgroup regardless of any other + rules. + If omitted the proxysql database default for I(transaction_persistent) + is C(False). + type: bool + fast_forward: + description: + - If I(fast_forward) is set to C(True), I(fast_forward) will bypass the + query processing layer (rewriting, caching) and pass through the query + directly as is to the backend server. If omitted the proxysql database + default for I(fast_forward) is C(False). + type: bool + backend: + description: + - If I(backend) is set to C(True), this (username, password) pair is + used for authenticating to the ProxySQL instance. + default: True + type: bool + frontend: + description: + - If I(frontend) is set to C(True), this (username, password) pair is + used for authenticating to the mysqld servers against any hostgroup. + default: True + type: bool + max_connections: + description: + - The maximum number of connections ProxySQL will open to the backend for + this user. If omitted the proxysql database default for + I(max_connections) is 10000. + type: int + state: + description: + - When C(present) - adds the user, when C(absent) - removes the user. + type: str + choices: [ "present", "absent" ] + default: present +extends_documentation_fragment: +- community.proxysql.proxysql.managing_config +- community.proxysql.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example adds a user, it saves the mysql user config to disk, but +# avoids loading the mysql user config to runtime (this might be because +# several users are being added and the user wants to push the config to +# runtime in a single batch using the community.general.proxysql_manage_config +# module). It uses supplied credentials to connect to the proxysql admin +# interface. + +- name: Add a user + proxysql_mysql_users: + login_user: 'admin' + login_password: 'admin' + username: 'productiondba' + state: present + load_to_runtime: False + +# This example removes a user, saves the mysql user config to disk, and +# dynamically loads the mysql user config to runtime. It uses credentials +# in a supplied config file to connect to the proxysql admin interface. + +- name: Remove a user + proxysql_mysql_users: + config_file: '~/proxysql.cnf' + username: 'mysqlboy' + state: absent +''' + +RETURN = ''' +stdout: + description: The mysql user modified or removed from proxysql + returned: On create/update will return the newly modified user, on delete + it will return the deleted record. + type: dict + sample: + changed: true + msg: Added user to mysql_users + state: present + user: + active: 1 + backend: 1 + default_hostgroup: 1 + default_schema: null + fast_forward: 0 + frontend: 1 + max_connections: 10000 + password: VALUE_SPECIFIED_IN_NO_LOG_PARAMETER + schema_locked: 0 + transaction_persistent: 0 + use_ssl: 0 + username: guest_ro + username: guest_ro +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.proxysql.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def save_config_to_disk(cursor): + cursor.execute("SAVE MYSQL USERS TO DISK") + return True + + +def load_config_to_runtime(cursor): + cursor.execute("LOAD MYSQL USERS TO RUNTIME") + return True + + +class ProxySQLUser(object): + + def __init__(self, module): + self.state = module.params["state"] + self.save_to_disk = module.params["save_to_disk"] + self.load_to_runtime = module.params["load_to_runtime"] + + self.username = module.params["username"] + self.backend = module.params["backend"] + self.frontend = module.params["frontend"] + + config_data_keys = ["password", + "active", + "use_ssl", + "default_hostgroup", + "default_schema", + "transaction_persistent", + "fast_forward", + "max_connections"] + + self.config_data = dict((k, module.params[k]) + for k in config_data_keys) + + def check_user_config_exists(self, cursor): + query_string = \ + """SELECT count(*) AS `user_count` + FROM mysql_users + WHERE username = %s + AND backend = %s + AND frontend = %s""" + + query_data = \ + [self.username, + self.backend, + self.frontend] + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + return (int(check_count['user_count']) > 0) + + def check_user_privs(self, cursor): + query_string = \ + """SELECT count(*) AS `user_count` + FROM mysql_users + WHERE username = %s + AND backend = %s + AND frontend = %s""" + + query_data = \ + [self.username, + self.backend, + self.frontend] + + for col, val in iteritems(self.config_data): + if val is not None: + query_data.append(val) + query_string += "\n AND " + col + " = %s" + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + return (int(check_count['user_count']) > 0) + + def get_user_config(self, cursor): + query_string = \ + """SELECT * + FROM mysql_users + WHERE username = %s + AND backend = %s + AND frontend = %s""" + + query_data = \ + [self.username, + self.backend, + self.frontend] + + cursor.execute(query_string, query_data) + user = cursor.fetchone() + return user + + def create_user_config(self, cursor): + query_string = \ + """INSERT INTO mysql_users ( + username, + backend, + frontend""" + + cols = 3 + query_data = \ + [self.username, + self.backend, + self.frontend] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + query_string += ",\n" + col + + query_string += \ + (")\n" + + "VALUES (" + + "%s ," * cols) + + query_string = query_string[:-2] + query_string += ")" + + cursor.execute(query_string, query_data) + return True + + def update_user_config(self, cursor): + query_string = """UPDATE mysql_users""" + + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + if cols == 1: + query_string += "\nSET " + col + "= %s," + else: + query_string += "\n " + col + " = %s," + + query_string = query_string[:-1] + query_string += ("\nWHERE username = %s\n AND backend = %s" + + "\n AND frontend = %s") + + query_data.append(self.username) + query_data.append(self.backend) + query_data.append(self.frontend) + + cursor.execute(query_string, query_data) + return True + + def delete_user_config(self, cursor): + query_string = \ + """DELETE FROM mysql_users + WHERE username = %s + AND backend = %s + AND frontend = %s""" + + query_data = \ + [self.username, + self.backend, + self.frontend] + + cursor.execute(query_string, query_data) + return True + + def manage_config(self, cursor, state): + if state: + if self.save_to_disk: + save_config_to_disk(cursor) + if self.load_to_runtime: + load_config_to_runtime(cursor) + + def create_user(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.create_user_config(cursor) + result['msg'] = "Added user to mysql_users" + result['user'] = \ + self.get_user_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("User would have been added to" + + " mysql_users, however check_mode" + + " is enabled.") + + def update_user(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.update_user_config(cursor) + result['msg'] = "Updated user in mysql_users" + result['user'] = \ + self.get_user_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("User would have been updated in" + + " mysql_users, however check_mode" + + " is enabled.") + + def delete_user(self, check_mode, result, cursor): + if not check_mode: + result['user'] = \ + self.get_user_config(cursor) + result['changed'] = \ + self.delete_user_config(cursor) + result['msg'] = "Deleted user from mysql_users" + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("User would have been deleted from" + + " mysql_users, however check_mode is" + + " enabled.") + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default="127.0.0.1"), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default='', type='path'), + username=dict(required=True, type='str'), + password=dict(no_log=True, type='str'), + active=dict(type='bool'), + use_ssl=dict(type='bool'), + default_hostgroup=dict(type='int'), + default_schema=dict(type='str'), + transaction_persistent=dict(type='bool'), + fast_forward=dict(type='bool'), + backend=dict(default=True, type='bool'), + frontend=dict(default=True, type='bool'), + max_connections=dict(type='int'), + state=dict(default='present', choices=['present', + 'absent']), + save_to_disk=dict(default=True, type='bool'), + load_to_runtime=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file, + cursor_class='DictCursor') + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + proxysql_user = ProxySQLUser(module) + result = {} + + result['state'] = proxysql_user.state + if proxysql_user.username: + result['username'] = proxysql_user.username + + if proxysql_user.state == "present": + try: + if not proxysql_user.check_user_privs(cursor): + if not proxysql_user.check_user_config_exists(cursor): + proxysql_user.create_user(module.check_mode, + result, + cursor) + else: + proxysql_user.update_user(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The user already exists in mysql_users" + + " and doesn't need to be updated.") + result['user'] = \ + proxysql_user.get_user_config(cursor) + except mysql_driver.Error as e: + module.fail_json( + msg="unable to modify user.. %s" % to_native(e) + ) + + elif proxysql_user.state == "absent": + try: + if proxysql_user.check_user_config_exists(cursor): + proxysql_user.delete_user(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The user is already absent from the" + + " mysql_users memory configuration") + except mysql_driver.Error as e: + module.fail_json( + msg="unable to remove user.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/proxysql_query_rules.py b/plugins/modules/proxysql_query_rules.py new file mode 100644 index 0000000..65649a9 --- /dev/null +++ b/plugins/modules/proxysql_query_rules.py @@ -0,0 +1,632 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_query_rules +author: "Ben Mildren (@bmildren)" +short_description: Modifies query rules using the proxysql admin interface. +description: + - The M(community.general.proxysql_query_rules) module modifies query rules using the + proxysql admin interface. +options: + rule_id: + description: + - The unique id of the rule. Rules are processed in rule_id order. + type: int + active: + description: + - A rule with I(active) set to C(False) will be tracked in the database, + but will be never loaded in the in-memory data structures. + type: bool + username: + description: + - Filtering criteria matching username. If I(username) is non-NULL, a + query will match only if the connection is made with the correct + username. + type: str + schemaname: + description: + - Filtering criteria matching schemaname. If I(schemaname) is non-NULL, a + query will match only if the connection uses schemaname as its default + schema. + type: str + flagIN: + description: + - Used in combination with I(flagOUT) and I(apply) to create chains of + rules. + type: int + client_addr: + description: + - Match traffic from a specific source. + type: str + proxy_addr: + description: + - Match incoming traffic on a specific local IP. + type: str + proxy_port: + description: + - Match incoming traffic on a specific local port. + type: int + digest: + description: + - Match queries with a specific digest, as returned by + stats_mysql_query_digest.digest. + type: str + match_digest: + description: + - Regular expression that matches the query digest. The dialect of + regular expressions used is that of re2 - https://github.com/google/re2 + type: str + match_pattern: + description: + - Regular expression that matches the query text. The dialect of regular + expressions used is that of re2 - https://github.com/google/re2 + type: str + negate_match_pattern: + description: + - If I(negate_match_pattern) is set to C(True), only queries not matching + the query text will be considered as a match. This acts as a NOT + operator in front of the regular expression matching against + match_pattern. + type: bool + flagOUT: + description: + - Used in combination with I(flagIN) and apply to create chains of rules. + When set, I(flagOUT) signifies the I(flagIN) to be used in the next + chain of rules. + type: int + replace_pattern: + description: + - This is the pattern with which to replace the matched pattern. Note + that this is optional, and when omitted, the query processor will only + cache, route, or set other parameters without rewriting. + type: str + destination_hostgroup: + description: + - Route matched queries to this hostgroup. This happens unless there is a + started transaction and the logged in user has + I(transaction_persistent) set to C(True) (see M(community.general.proxysql_mysql_users)). + type: int + cache_ttl: + description: + - The number of milliseconds for which to cache the result of the query. + Note in ProxySQL 1.1 I(cache_ttl) was in seconds. + type: int + timeout: + description: + - The maximum timeout in milliseconds with which the matched or rewritten + query should be executed. If a query run for longer than the specific + threshold, the query is automatically killed. If timeout is not + specified, the global variable mysql-default_query_timeout applies. + type: int + retries: + description: + - The maximum number of times a query needs to be re-executed in case of + detected failure during the execution of the query. If retries is not + specified, the global variable mysql-query_retries_on_failure applies. + type: int + delay: + description: + - Number of milliseconds to delay the execution of the query. This is + essentially a throttling mechanism and QoS, and allows a way to give + priority to queries over others. This value is added to the + mysql-default_query_delay global variable that applies to all queries. + type: int + mirror_flagOUT: + description: + - Enables query mirroring. If set I(mirror_flagOUT) can be used to + evaluates the mirrored query against the specified chain of rules. + type: int + mirror_hostgroup: + description: + - Enables query mirroring. If set I(mirror_hostgroup) can be used to + mirror queries to the same or different hostgroup. + type: int + error_msg: + description: + - Query will be blocked, and the specified error_msg will be returned to + the client. + type: str + log: + description: + - Query will be logged. + type: bool + apply: + description: + - Used in combination with I(flagIN) and I(flagOUT) to create chains of + rules. Setting apply to True signifies the last rule to be applied. + type: bool + comment: + description: + - Free form text field, usable for a descriptive comment of the query + rule. + type: str + state: + description: + - When C(present) - adds the rule, when C(absent) - removes the rule. + type: str + choices: [ "present", "absent" ] + default: present + force_delete: + description: + - By default we avoid deleting more than one schedule in a single batch, + however if you need this behaviour and you're not concerned about the + schedules deleted, you can set I(force_delete) to C(True). + type: bool + default: False +extends_documentation_fragment: +- community.proxysql.proxysql.managing_config +- community.proxysql.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example adds a rule to redirect queries from a specific user to another +# hostgroup, it saves the mysql query rule config to disk, but avoids loading +# the mysql query config config to runtime (this might be because several +# rules are being added and the user wants to push the config to runtime in a +# single batch using the community.general.proxysql_manage_config module). It +# uses supplied credentials to connect to the proxysql admin interface. + +- name: Add a rule + proxysql_query_rules: + login_user: admin + login_password: admin + username: 'guest_ro' + match_pattern: "^SELECT.*" + destination_hostgroup: 1 + active: 1 + retries: 3 + state: present + load_to_runtime: False + +# This example removes all rules that use the username 'guest_ro', saves the +# mysql query rule config to disk, and dynamically loads the mysql query rule +# config to runtime. It uses credentials in a supplied config file to connect +# to the proxysql admin interface. + +- name: Remove rules + proxysql_query_rules: + config_file: '~/proxysql.cnf' + username: 'guest_ro' + state: absent + force_delete: true +''' + +RETURN = ''' +stdout: + description: The mysql user modified or removed from proxysql + returned: On create/update will return the newly modified rule, in all + other cases will return a list of rules that match the supplied + criteria. + type: dict + "sample": { + "changed": true, + "msg": "Added rule to mysql_query_rules", + "rules": [ + { + "active": "0", + "apply": "0", + "cache_ttl": null, + "client_addr": null, + "comment": null, + "delay": null, + "destination_hostgroup": 1, + "digest": null, + "error_msg": null, + "flagIN": "0", + "flagOUT": null, + "log": null, + "match_digest": null, + "match_pattern": null, + "mirror_flagOUT": null, + "mirror_hostgroup": null, + "negate_match_pattern": "0", + "proxy_addr": null, + "proxy_port": null, + "reconnect": null, + "replace_pattern": null, + "retries": null, + "rule_id": "1", + "schemaname": null, + "timeout": null, + "username": "guest_ro" + } + ], + "state": "present" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.proxysql.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def save_config_to_disk(cursor): + cursor.execute("SAVE MYSQL QUERY RULES TO DISK") + return True + + +def load_config_to_runtime(cursor): + cursor.execute("LOAD MYSQL QUERY RULES TO RUNTIME") + return True + + +class ProxyQueryRule(object): + + def __init__(self, module): + self.state = module.params["state"] + self.force_delete = module.params["force_delete"] + self.save_to_disk = module.params["save_to_disk"] + self.load_to_runtime = module.params["load_to_runtime"] + + config_data_keys = ["rule_id", + "active", + "username", + "schemaname", + "flagIN", + "client_addr", + "proxy_addr", + "proxy_port", + "digest", + "match_digest", + "match_pattern", + "negate_match_pattern", + "flagOUT", + "replace_pattern", + "destination_hostgroup", + "cache_ttl", + "timeout", + "retries", + "delay", + "mirror_flagOUT", + "mirror_hostgroup", + "error_msg", + "log", + "apply", + "comment"] + + self.config_data = dict((k, module.params[k]) + for k in config_data_keys) + + def check_rule_pk_exists(self, cursor): + query_string = \ + """SELECT count(*) AS `rule_count` + FROM mysql_query_rules + WHERE rule_id = %s""" + + query_data = \ + [self.config_data["rule_id"]] + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + return (int(check_count['rule_count']) > 0) + + def check_rule_cfg_exists(self, cursor): + query_string = \ + """SELECT count(*) AS `rule_count` + FROM mysql_query_rules""" + + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + if cols == 1: + query_string += "\n WHERE " + col + " = %s" + else: + query_string += "\n AND " + col + " = %s" + + if cols > 0: + cursor.execute(query_string, query_data) + else: + cursor.execute(query_string) + check_count = cursor.fetchone() + return int(check_count['rule_count']) + + def get_rule_config(self, cursor, created_rule_id=None): + query_string = \ + """SELECT * + FROM mysql_query_rules""" + + if created_rule_id: + query_data = [created_rule_id, ] + query_string += "\nWHERE rule_id = %s" + + cursor.execute(query_string, query_data) + rule = cursor.fetchone() + else: + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + if cols == 1: + query_string += "\n WHERE " + col + " = %s" + else: + query_string += "\n AND " + col + " = %s" + + if cols > 0: + cursor.execute(query_string, query_data) + else: + cursor.execute(query_string) + rule = cursor.fetchall() + + return rule + + def create_rule_config(self, cursor): + query_string = \ + """INSERT INTO mysql_query_rules (""" + + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + query_string += "\n" + col + "," + + query_string = query_string[:-1] + + query_string += \ + (")\n" + + "VALUES (" + + "%s ," * cols) + + query_string = query_string[:-2] + query_string += ")" + + cursor.execute(query_string, query_data) + new_rule_id = cursor.lastrowid + return True, new_rule_id + + def update_rule_config(self, cursor): + query_string = """UPDATE mysql_query_rules""" + + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None and col != "rule_id": + cols += 1 + query_data.append(val) + if cols == 1: + query_string += "\nSET " + col + "= %s," + else: + query_string += "\n " + col + " = %s," + + query_string = query_string[:-1] + query_string += "\nWHERE rule_id = %s" + + query_data.append(self.config_data["rule_id"]) + + cursor.execute(query_string, query_data) + return True + + def delete_rule_config(self, cursor): + query_string = \ + """DELETE FROM mysql_query_rules""" + + cols = 0 + query_data = [] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + if cols == 1: + query_string += "\n WHERE " + col + " = %s" + else: + query_string += "\n AND " + col + " = %s" + + if cols > 0: + cursor.execute(query_string, query_data) + else: + cursor.execute(query_string) + check_count = cursor.rowcount + return True, int(check_count) + + def manage_config(self, cursor, state): + if state: + if self.save_to_disk: + save_config_to_disk(cursor) + if self.load_to_runtime: + load_config_to_runtime(cursor) + + def create_rule(self, check_mode, result, cursor): + if not check_mode: + result['changed'], new_rule_id = \ + self.create_rule_config(cursor) + result['msg'] = "Added rule to mysql_query_rules" + self.manage_config(cursor, + result['changed']) + result['rules'] = \ + self.get_rule_config(cursor, new_rule_id) + else: + result['changed'] = True + result['msg'] = ("Rule would have been added to" + + " mysql_query_rules, however" + + " check_mode is enabled.") + + def update_rule(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.update_rule_config(cursor) + result['msg'] = "Updated rule in mysql_query_rules" + self.manage_config(cursor, + result['changed']) + result['rules'] = \ + self.get_rule_config(cursor) + else: + result['changed'] = True + result['msg'] = ("Rule would have been updated in" + + " mysql_query_rules, however" + + " check_mode is enabled.") + + def delete_rule(self, check_mode, result, cursor): + if not check_mode: + result['rules'] = \ + self.get_rule_config(cursor) + result['changed'], result['rows_affected'] = \ + self.delete_rule_config(cursor) + result['msg'] = "Deleted rule from mysql_query_rules" + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Rule would have been deleted from" + + " mysql_query_rules, however" + + " check_mode is enabled.") + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default="127.0.0.1"), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default="", type='path'), + rule_id=dict(type='int'), + active=dict(type='bool'), + username=dict(type='str'), + schemaname=dict(type='str'), + flagIN=dict(type='int'), + client_addr=dict(type='str'), + proxy_addr=dict(type='str'), + proxy_port=dict(type='int'), + digest=dict(type='str'), + match_digest=dict(type='str'), + match_pattern=dict(type='str'), + negate_match_pattern=dict(type='bool'), + flagOUT=dict(type='int'), + replace_pattern=dict(type='str'), + destination_hostgroup=dict(type='int'), + cache_ttl=dict(type='int'), + timeout=dict(type='int'), + retries=dict(type='int'), + delay=dict(type='int'), + mirror_flagOUT=dict(type='int'), + mirror_hostgroup=dict(type='int'), + error_msg=dict(type='str'), + log=dict(type='bool'), + apply=dict(type='bool'), + comment=dict(type='str'), + state=dict(default='present', choices=['present', + 'absent']), + force_delete=dict(default=False, type='bool'), + save_to_disk=dict(default=True, type='bool'), + load_to_runtime=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file, + cursor_class='DictCursor') + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + proxysql_query_rule = ProxyQueryRule(module) + result = {} + + result['state'] = proxysql_query_rule.state + + if proxysql_query_rule.state == "present": + try: + if not proxysql_query_rule.check_rule_cfg_exists(cursor): + if proxysql_query_rule.config_data["rule_id"] and \ + proxysql_query_rule.check_rule_pk_exists(cursor): + proxysql_query_rule.update_rule(module.check_mode, + result, + cursor) + else: + proxysql_query_rule.create_rule(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The rule already exists in" + + " mysql_query_rules and doesn't need to be" + + " updated.") + result['rules'] = \ + proxysql_query_rule.get_rule_config(cursor) + + except mysql_driver.Error as e: + module.fail_json( + msg="unable to modify rule.. %s" % to_native(e) + ) + + elif proxysql_query_rule.state == "absent": + try: + existing_rules = proxysql_query_rule.check_rule_cfg_exists(cursor) + if existing_rules > 0: + if existing_rules == 1 or \ + proxysql_query_rule.force_delete: + proxysql_query_rule.delete_rule(module.check_mode, + result, + cursor) + else: + module.fail_json( + msg=("Operation would delete multiple rules" + + " use force_delete to override this") + ) + else: + result['changed'] = False + result['msg'] = ("The rule is already absent from the" + + " mysql_query_rules memory configuration") + except mysql_driver.Error as e: + module.fail_json( + msg="unable to remove rule.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/proxysql_replication_hostgroups.py b/plugins/modules/proxysql_replication_hostgroups.py new file mode 100644 index 0000000..90178c8 --- /dev/null +++ b/plugins/modules/proxysql_replication_hostgroups.py @@ -0,0 +1,381 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_replication_hostgroups +author: "Ben Mildren (@bmildren)" +short_description: Manages replication hostgroups using the proxysql admin + interface. +description: + - Each row in mysql_replication_hostgroups represent a pair of + writer_hostgroup and reader_hostgroup. ProxySQL will monitor the value of + read_only for all the servers in specified hostgroups, and based on the + value of read_only will assign the server to the writer or reader + hostgroups. +options: + writer_hostgroup: + description: + - Id of the writer hostgroup. + type: int + required: True + reader_hostgroup: + description: + - Id of the reader hostgroup. + type: int + required: True + comment: + description: + - Text field that can be used for any purposes defined by the user. + type: str + state: + description: + - When C(present) - adds the replication hostgroup, when C(absent) - + removes the replication hostgroup. + type: str + choices: [ "present", "absent" ] + default: present +extends_documentation_fragment: +- community.proxysql.proxysql.managing_config +- community.proxysql.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example adds a replication hostgroup, it saves the mysql server config +# to disk, but avoids loading the mysql server config to runtime (this might be +# because several replication hostgroup are being added and the user wants to +# push the config to runtime in a single batch using the +# community.general.proxysql_manage_config module). It uses supplied credentials +# to connect to the proxysql admin interface. + +- name: Add a replication hostgroup + proxysql_replication_hostgroups: + login_user: 'admin' + login_password: 'admin' + writer_hostgroup: 1 + reader_hostgroup: 2 + state: present + load_to_runtime: False + +# This example removes a replication hostgroup, saves the mysql server config +# to disk, and dynamically loads the mysql server config to runtime. It uses +# credentials in a supplied config file to connect to the proxysql admin +# interface. + +- name: Remove a replication hostgroup + proxysql_replication_hostgroups: + config_file: '~/proxysql.cnf' + writer_hostgroup: 3 + reader_hostgroup: 4 + state: absent +''' + +RETURN = ''' +stdout: + description: The replication hostgroup modified or removed from proxysql + returned: On create/update will return the newly modified group, on delete + it will return the deleted record. + type: dict + "sample": { + "changed": true, + "msg": "Added server to mysql_hosts", + "repl_group": { + "comment": "", + "reader_hostgroup": "1", + "writer_hostgroup": "2" + }, + "state": "present" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.proxysql.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if not module.params["writer_hostgroup"] >= 0: + module.fail_json( + msg="writer_hostgroup must be a integer greater than or equal to 0" + ) + + if not module.params["reader_hostgroup"] == \ + module.params["writer_hostgroup"]: + if not module.params["reader_hostgroup"] > 0: + module.fail_json( + msg=("writer_hostgroup must be a integer greater than" + + " or equal to 0") + ) + else: + module.fail_json( + msg="reader_hostgroup cannot equal writer_hostgroup" + ) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def save_config_to_disk(cursor): + cursor.execute("SAVE MYSQL SERVERS TO DISK") + return True + + +def load_config_to_runtime(cursor): + cursor.execute("LOAD MYSQL SERVERS TO RUNTIME") + return True + + +class ProxySQLReplicationHostgroup(object): + + def __init__(self, module): + self.state = module.params["state"] + self.save_to_disk = module.params["save_to_disk"] + self.load_to_runtime = module.params["load_to_runtime"] + self.writer_hostgroup = module.params["writer_hostgroup"] + self.reader_hostgroup = module.params["reader_hostgroup"] + self.comment = module.params["comment"] + + def check_repl_group_config(self, cursor, keys): + query_string = \ + """SELECT count(*) AS `repl_groups` + FROM mysql_replication_hostgroups + WHERE writer_hostgroup = %s + AND reader_hostgroup = %s""" + + query_data = \ + [self.writer_hostgroup, + self.reader_hostgroup] + + if self.comment and not keys: + query_string += "\n AND comment = %s" + query_data.append(self.comment) + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + return (int(check_count['repl_groups']) > 0) + + def get_repl_group_config(self, cursor): + query_string = \ + """SELECT * + FROM mysql_replication_hostgroups + WHERE writer_hostgroup = %s + AND reader_hostgroup = %s""" + + query_data = \ + [self.writer_hostgroup, + self.reader_hostgroup] + + cursor.execute(query_string, query_data) + repl_group = cursor.fetchone() + return repl_group + + def create_repl_group_config(self, cursor): + query_string = \ + """INSERT INTO mysql_replication_hostgroups ( + writer_hostgroup, + reader_hostgroup, + comment) + VALUES (%s, %s, %s)""" + + query_data = \ + [self.writer_hostgroup, + self.reader_hostgroup, + self.comment or ''] + + cursor.execute(query_string, query_data) + return True + + def update_repl_group_config(self, cursor): + query_string = \ + """UPDATE mysql_replication_hostgroups + SET comment = %s + WHERE writer_hostgroup = %s + AND reader_hostgroup = %s""" + + query_data = \ + [self.comment, + self.writer_hostgroup, + self.reader_hostgroup] + + cursor.execute(query_string, query_data) + return True + + def delete_repl_group_config(self, cursor): + query_string = \ + """DELETE FROM mysql_replication_hostgroups + WHERE writer_hostgroup = %s + AND reader_hostgroup = %s""" + + query_data = \ + [self.writer_hostgroup, + self.reader_hostgroup] + + cursor.execute(query_string, query_data) + return True + + def manage_config(self, cursor, state): + if state: + if self.save_to_disk: + save_config_to_disk(cursor) + if self.load_to_runtime: + load_config_to_runtime(cursor) + + def create_repl_group(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.create_repl_group_config(cursor) + result['msg'] = "Added server to mysql_hosts" + result['repl_group'] = \ + self.get_repl_group_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Repl group would have been added to" + + " mysql_replication_hostgroups, however" + + " check_mode is enabled.") + + def update_repl_group(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.update_repl_group_config(cursor) + result['msg'] = "Updated server in mysql_hosts" + result['repl_group'] = \ + self.get_repl_group_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Repl group would have been updated in" + + " mysql_replication_hostgroups, however" + + " check_mode is enabled.") + + def delete_repl_group(self, check_mode, result, cursor): + if not check_mode: + result['repl_group'] = \ + self.get_repl_group_config(cursor) + result['changed'] = \ + self.delete_repl_group_config(cursor) + result['msg'] = "Deleted server from mysql_hosts" + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Repl group would have been deleted from" + + " mysql_replication_hostgroups, however" + + " check_mode is enabled.") + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default="127.0.0.1"), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default="", type='path'), + writer_hostgroup=dict(required=True, type='int'), + reader_hostgroup=dict(required=True, type='int'), + comment=dict(type='str'), + state=dict(default='present', choices=['present', + 'absent']), + save_to_disk=dict(default=True, type='bool'), + load_to_runtime=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file, + cursor_class='DictCursor') + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + proxysql_repl_group = ProxySQLReplicationHostgroup(module) + result = {} + + result['state'] = proxysql_repl_group.state + + if proxysql_repl_group.state == "present": + try: + if not proxysql_repl_group.check_repl_group_config(cursor, + keys=True): + proxysql_repl_group.create_repl_group(module.check_mode, + result, + cursor) + else: + if not proxysql_repl_group.check_repl_group_config(cursor, + keys=False): + proxysql_repl_group.update_repl_group(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The repl group already exists in" + + " mysql_replication_hostgroups and" + + " doesn't need to be updated.") + result['repl_group'] = \ + proxysql_repl_group.get_repl_group_config(cursor) + + except mysql_driver.Error as e: + module.fail_json( + msg="unable to modify replication hostgroup.. %s" % to_native(e) + ) + + elif proxysql_repl_group.state == "absent": + try: + if proxysql_repl_group.check_repl_group_config(cursor, + keys=True): + proxysql_repl_group.delete_repl_group(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The repl group is already absent from the" + + " mysql_replication_hostgroups memory" + + " configuration") + + except mysql_driver.Error as e: + module.fail_json( + msg="unable to delete replication hostgroup.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/plugins/modules/proxysql_scheduler.py b/plugins/modules/proxysql_scheduler.py new file mode 100644 index 0000000..345b5ff --- /dev/null +++ b/plugins/modules/proxysql_scheduler.py @@ -0,0 +1,424 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# Copyright: (c) 2017, Ansible Project +# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt) + +from __future__ import absolute_import, division, print_function +__metaclass__ = type + +DOCUMENTATION = ''' +--- +module: proxysql_scheduler +author: "Ben Mildren (@bmildren)" +short_description: Adds or removes schedules from proxysql admin interface. +description: + - The M(community.general.proxysql_scheduler) module adds or removes schedules using the + proxysql admin interface. +options: + active: + description: + - A schedule with I(active) set to C(False) will be tracked in the + database, but will be never loaded in the in-memory data structures. + type: bool + default: True + interval_ms: + description: + - How often (in millisecond) the job will be started. The minimum value + for I(interval_ms) is 100 milliseconds. + type: int + default: 10000 + filename: + description: + - Full path of the executable to be executed. + type: str + required: True + arg1: + description: + - Argument that can be passed to the job. + type: str + arg2: + description: + - Argument that can be passed to the job. + type: str + arg3: + description: + - Argument that can be passed to the job. + type: str + arg4: + description: + - Argument that can be passed to the job. + type: str + arg5: + description: + - Argument that can be passed to the job. + type: str + comment: + description: + - Text field that can be used for any purposed defined by the user. + type: str + state: + description: + - When C(present) - adds the schedule, when C(absent) - removes the + schedule. + type: str + choices: [ "present", "absent" ] + default: present + force_delete: + description: + - By default we avoid deleting more than one schedule in a single batch, + however if you need this behaviour and you're not concerned about the + schedules deleted, you can set I(force_delete) to C(True). + type: bool + default: False +extends_documentation_fragment: +- community.proxysql.proxysql.managing_config +- community.proxysql.proxysql.connectivity + +''' + +EXAMPLES = ''' +--- +# This example adds a schedule, it saves the scheduler config to disk, but +# avoids loading the scheduler config to runtime (this might be because +# several servers are being added and the user wants to push the config to +# runtime in a single batch using the community.general.proxysql_manage_config +# module). It uses supplied credentials to connect to the proxysql admin +# interface. + +- name: Add a schedule + proxysql_scheduler: + login_user: 'admin' + login_password: 'admin' + interval_ms: 1000 + filename: "/opt/maintenance.py" + state: present + load_to_runtime: False + +# This example removes a schedule, saves the scheduler config to disk, and +# dynamically loads the scheduler config to runtime. It uses credentials +# in a supplied config file to connect to the proxysql admin interface. + +- name: Remove a schedule + proxysql_scheduler: + config_file: '~/proxysql.cnf' + filename: "/opt/old_script.py" + state: absent +''' + +RETURN = ''' +stdout: + description: The schedule modified or removed from proxysql + returned: On create/update will return the newly modified schedule, on + delete it will return the deleted record. + type: dict + "sample": { + "changed": true, + "filename": "/opt/test.py", + "msg": "Added schedule to scheduler", + "schedules": [ + { + "active": "1", + "arg1": null, + "arg2": null, + "arg3": null, + "arg4": null, + "arg5": null, + "comment": "", + "filename": "/opt/test.py", + "id": "1", + "interval_ms": "10000" + } + ], + "state": "present" + } +''' + +from ansible.module_utils.basic import AnsibleModule +from ansible_collections.community.proxysql.plugins.module_utils.mysql import mysql_connect, mysql_driver, mysql_driver_fail_msg +from ansible.module_utils.six import iteritems +from ansible.module_utils._text import to_native + +# =========================================== +# proxysql module specific support methods. +# + + +def perform_checks(module): + if module.params["login_port"] < 0 \ + or module.params["login_port"] > 65535: + module.fail_json( + msg="login_port must be a valid unix port number (0-65535)" + ) + + if module.params["interval_ms"] < 100 \ + or module.params["interval_ms"] > 100000000: + module.fail_json( + msg="interval_ms must between 100ms & 100000000ms" + ) + + if mysql_driver is None: + module.fail_json(msg=mysql_driver_fail_msg) + + +def save_config_to_disk(cursor): + cursor.execute("SAVE SCHEDULER TO DISK") + return True + + +def load_config_to_runtime(cursor): + cursor.execute("LOAD SCHEDULER TO RUNTIME") + return True + + +class ProxySQLSchedule(object): + + def __init__(self, module): + self.state = module.params["state"] + self.force_delete = module.params["force_delete"] + self.save_to_disk = module.params["save_to_disk"] + self.load_to_runtime = module.params["load_to_runtime"] + self.active = module.params["active"] + self.interval_ms = module.params["interval_ms"] + self.filename = module.params["filename"] + + config_data_keys = ["arg1", + "arg2", + "arg3", + "arg4", + "arg5", + "comment"] + + self.config_data = dict((k, module.params[k]) + for k in config_data_keys) + + def check_schedule_config(self, cursor): + query_string = \ + """SELECT count(*) AS `schedule_count` + FROM scheduler + WHERE active = %s + AND interval_ms = %s + AND filename = %s""" + + query_data = \ + [self.active, + self.interval_ms, + self.filename] + + for col, val in iteritems(self.config_data): + if val is not None: + query_data.append(val) + query_string += "\n AND " + col + " = %s" + + cursor.execute(query_string, query_data) + check_count = cursor.fetchone() + return int(check_count['schedule_count']) + + def get_schedule_config(self, cursor): + query_string = \ + """SELECT * + FROM scheduler + WHERE active = %s + AND interval_ms = %s + AND filename = %s""" + + query_data = \ + [self.active, + self.interval_ms, + self.filename] + + for col, val in iteritems(self.config_data): + if val is not None: + query_data.append(val) + query_string += "\n AND " + col + " = %s" + + cursor.execute(query_string, query_data) + schedule = cursor.fetchall() + return schedule + + def create_schedule_config(self, cursor): + query_string = \ + """INSERT INTO scheduler ( + active, + interval_ms, + filename""" + + cols = 0 + query_data = \ + [self.active, + self.interval_ms, + self.filename] + + for col, val in iteritems(self.config_data): + if val is not None: + cols += 1 + query_data.append(val) + query_string += ",\n" + col + + query_string += \ + (")\n" + + "VALUES (%s, %s, %s" + + ", %s" * cols + + ")") + + cursor.execute(query_string, query_data) + return True + + def delete_schedule_config(self, cursor): + query_string = \ + """DELETE FROM scheduler + WHERE active = %s + AND interval_ms = %s + AND filename = %s""" + + query_data = \ + [self.active, + self.interval_ms, + self.filename] + + for col, val in iteritems(self.config_data): + if val is not None: + query_data.append(val) + query_string += "\n AND " + col + " = %s" + + cursor.execute(query_string, query_data) + check_count = cursor.rowcount + return True, int(check_count) + + def manage_config(self, cursor, state): + if state: + if self.save_to_disk: + save_config_to_disk(cursor) + if self.load_to_runtime: + load_config_to_runtime(cursor) + + def create_schedule(self, check_mode, result, cursor): + if not check_mode: + result['changed'] = \ + self.create_schedule_config(cursor) + result['msg'] = "Added schedule to scheduler" + result['schedules'] = \ + self.get_schedule_config(cursor) + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Schedule would have been added to" + + " scheduler, however check_mode" + + " is enabled.") + + def delete_schedule(self, check_mode, result, cursor): + if not check_mode: + result['schedules'] = \ + self.get_schedule_config(cursor) + result['changed'] = \ + self.delete_schedule_config(cursor) + result['msg'] = "Deleted schedule from scheduler" + self.manage_config(cursor, + result['changed']) + else: + result['changed'] = True + result['msg'] = ("Schedule would have been deleted from" + + " scheduler, however check_mode is" + + " enabled.") + +# =========================================== +# Module execution. +# + + +def main(): + module = AnsibleModule( + argument_spec=dict( + login_user=dict(default=None, type='str'), + login_password=dict(default=None, no_log=True, type='str'), + login_host=dict(default="127.0.0.1"), + login_unix_socket=dict(default=None), + login_port=dict(default=6032, type='int'), + config_file=dict(default="", type='path'), + active=dict(default=True, type='bool'), + interval_ms=dict(default=10000, type='int'), + filename=dict(required=True, type='str'), + arg1=dict(type='str'), + arg2=dict(type='str'), + arg3=dict(type='str'), + arg4=dict(type='str'), + arg5=dict(type='str'), + comment=dict(type='str'), + state=dict(default='present', choices=['present', + 'absent']), + force_delete=dict(default=False, type='bool'), + save_to_disk=dict(default=True, type='bool'), + load_to_runtime=dict(default=True, type='bool') + ), + supports_check_mode=True + ) + + perform_checks(module) + + login_user = module.params["login_user"] + login_password = module.params["login_password"] + config_file = module.params["config_file"] + + cursor = None + try: + cursor, db_conn = mysql_connect(module, + login_user, + login_password, + config_file, + cursor_class='DictCursor') + except mysql_driver.Error as e: + module.fail_json( + msg="unable to connect to ProxySQL Admin Module.. %s" % to_native(e) + ) + + proxysql_schedule = ProxySQLSchedule(module) + result = {} + + result['state'] = proxysql_schedule.state + result['filename'] = proxysql_schedule.filename + + if proxysql_schedule.state == "present": + try: + if proxysql_schedule.check_schedule_config(cursor) <= 0: + proxysql_schedule.create_schedule(module.check_mode, + result, + cursor) + else: + result['changed'] = False + result['msg'] = ("The schedule already exists and doesn't" + + " need to be updated.") + result['schedules'] = \ + proxysql_schedule.get_schedule_config(cursor) + except mysql_driver.Error as e: + module.fail_json( + msg="unable to modify schedule.. %s" % to_native(e) + ) + + elif proxysql_schedule.state == "absent": + try: + existing_schedules = \ + proxysql_schedule.check_schedule_config(cursor) + if existing_schedules > 0: + if existing_schedules == 1 or proxysql_schedule.force_delete: + proxysql_schedule.delete_schedule(module.check_mode, + result, + cursor) + else: + module.fail_json( + msg=("Operation would delete multiple records" + + " use force_delete to override this") + ) + else: + result['changed'] = False + result['msg'] = ("The schedule is already absent from the" + + " memory configuration") + except mysql_driver.Error as e: + module.fail_json( + msg="unable to remove schedule.. %s" % to_native(e) + ) + + module.exit_json(**result) + + +if __name__ == '__main__': + main() diff --git a/roles/proxysql/.yamllint b/roles/proxysql/.yamllint new file mode 100644 index 0000000..a027086 --- /dev/null +++ b/roles/proxysql/.yamllint @@ -0,0 +1,11 @@ +--- +extends: default + +rules: + braces: + max-spaces-inside: 1 + level: error + brackets: + max-spaces-inside: 1 + level: error + line-length: disable diff --git a/roles/proxysql/README.md b/roles/proxysql/README.md new file mode 100644 index 0000000..5cc42b1 --- /dev/null +++ b/roles/proxysql/README.md @@ -0,0 +1,43 @@ +Ansible Role: ProxySQL +====================== + +This role installs, and configures ProxySQL. + +Requirements +------------ + +None + +Role Variables +-------------- + +As with all roles designed in Data Platforms, the interface to variables in this role should only be via the role defaults, and it shouldn't be necessary to override the role vars. + +A full list of defaults and their values can be found in the `defaults/main.yml`. + +Dependencies +------------ + +None + +Example Playbook +---------------- + +``` + - hosts: servers + tasks: + - import_role: + name: role_mysql_proxysql + tags: + - proxysql +``` + +License +------- + +BSD + +Author Information +------------------ + +Ben Mildren diff --git a/roles/proxysql/defaults/main.yml b/roles/proxysql/defaults/main.yml new file mode 100644 index 0000000..9378156 --- /dev/null +++ b/roles/proxysql/defaults/main.yml @@ -0,0 +1,171 @@ +--- +### proxysql install +proxysql_download_src: https://github.com/sysown/proxysql/releases/download +proxysql_version: 2.0.10 +proxysql_mysql_client_version: 5.7 +proxysql_mysql_use_custom_build: false +proxysql_force_restart: false + +proxysql_user: proxysql +proxysql_group: proxysql +proxysql_datadir: /var/lib/proxysql + +proxysql_restart_missing_heartbeats: 10 + +### admin variables +proxysql_admin_user: admin +proxysql_admin_password: admin +proxysql_admin_stats_user: stats +proxysql_admin_stats_password: stats +proxysql_admin_bind_address: 0.0.0.0 +proxysql_admin_port: 6032 +proxysql_admin_socket: /tmp/proxysql_admin.sock +proxysql_admin_read_only: false +proxysql_admin_refresh_interval: 2000 +proxysql_admin_hash_passwords: true + +# cluster +proxysql_admin_cluster_username: +proxysql_admin_cluster_password: +proxysql_admin_cluster_check_interval_ms: 1000 +proxysql_admin_cluster_check_status_frequency: 10 +proxysql_admin_cluster_proxysql_servers_diffs_before_sync: 3 +proxysql_admin_cluster_proxysql_servers_save_to_disk: true +proxysql_admin_checksum_mysql_query_rules: true +proxysql_admin_cluster_mysql_query_rules_diffs_before_sync: 3 +proxysql_admin_cluster_mysql_query_rules_save_to_disk: true +proxysql_admin_checksum_mysql_servers: true +proxysql_admin_cluster_mysql_servers_diffs_before_sync: 3 +proxysql_admin_cluster_mysql_servers_save_to_disk: true +proxysql_admin_checksum_mysql_users: true +proxysql_admin_cluster_mysql_users_diffs_before_sync: 3 +proxysql_admin_cluster_mysql_users_save_to_disk: true + +# historical stats +proxysql_admin_stats_system_cpu: 60 +proxysql_admin_stats_system_memory: 60 +proxysql_admin_stats_mysql_connection_pool: 60 +proxysql_admin_stats_mysql_connections: 60 +proxysql_admin_stats_mysql_query_cache: 60 + +# web interface +proxysql_admin_web_enabled: false +proxysql_admin_web_port: 6080 + + +### mysql variables +proxysql_mysql_bind_address: 0.0.0.0 +proxysql_mysql_port: 6033 +proxysql_mysql_socket: /tmp/proxysql.sock + +# connection pool +proxysql_mysql_connect_retries_delay: 1 +proxysql_mysql_connect_retries_on_failure: 10 +proxysql_mysql_connect_timeout_server: 3000 +proxysql_mysql_connect_timeout_server_max: 10000 +proxysql_mysql_connection_delay_multiplex_ms: 0 +proxysql_mysql_connection_max_age_ms: 0 +proxysql_mysql_connpoll_reset_queue_length: 50 +proxysql_mysql_default_max_latency_ms: 1000 +proxysql_mysql_free_connections_pct: 10 +proxysql_mysql_max_connections: 2048 +proxysql_mysql_multiplexing: true +proxysql_mysql_ping_interval_server_msec: 120000 +proxysql_mysql_ping_timeout_server: 500 +proxysql_mysql_poll_timeout: 2000 +proxysql_mysql_poll_timeout_on_failure: 100 +proxysql_mysql_session_idle_ms: 1000 +proxysql_mysql_session_idle_show_processlist: true +proxysql_mysql_sessions_sort: true +proxysql_mysql_shun_on_failures: 5 +proxysql_mysql_shun_recovery_time_sec: 10 +proxysql_mysql_stacksize: 1048576 +proxysql_mysql_threads: 4 +proxysql_mysql_threshold_query_length: 524288 +proxysql_mysql_threshold_resultset_size: 4194304 +proxysql_mysql_throttle_connections_per_sec_to_hostgroup: 1000000 +proxysql_mysql_throttle_max_bytes_per_second_to_client: 2147483647 +proxysql_mysql_throttle_ratio_server_to_client: 0 + +# session +proxysql_mysql_client_found_rows: true +proxysql_mysql_default_charset: utf8 +proxysql_mysql_default_query_delay: 0 +proxysql_mysql_default_query_timeout: 36000000 +proxysql_mysql_default_schema: information_schema +proxysql_mysql_default_sql_mode: +proxysql_mysql_default_time_zone: SYSTEM +proxysql_mysql_init_connect: +proxysql_mysql_max_allowed_packet: 4194304 +proxysql_mysql_max_transaction_time: 14400000 +proxysql_mysql_query_retries_on_failure: 1 +proxysql_mysql_server_capabilities: 45578 +proxysql_mysql_server_version: 5.5.30 +proxysql_mysql_mysql_wait_timeout: 28800000 +proxysql_mysql_kill_backend_connection_when_disconnect: true + +# ssl +proxysql_mysql_ssl_p2s_ca: +proxysql_mysql_ssl_p2s_cert: +proxysql_mysql_ssl_p2s_cipher: +proxysql_mysql_ssl_p2s_key: + +# query processing +proxysql_mysql_query_processor_iterations: 0 +proxysql_mysql_query_processor_regex: 1 + +# autocommit +proxysql_mysql_autocommit_false_is_transaction: false +proxysql_mysql_autocommit_false_not_reusable: false +proxysql_mysql_enforce_autocommit_on_reads: false +proxysql_mysql_forward_autocommit: false + +# prepared statements +proxysql_mysql_max_stmts_cache: 10000 +proxysql_mysql_max_stmts_per_connection: 20 + +# query cache +proxysql_mysql_query_cache_size_mb: 256 + +# mirroring +proxysql_mysql_mirror_max_concurrency: 16 +proxysql_mysql_mirror_max_queue_length: 32000 + +# monitor +proxysql_mysql_monitor_username: monitor +proxysql_mysql_monitor_password: monitor + +proxysql_mysql_monitor_connect_interval: 60000 +proxysql_mysql_monitor_connect_timeout: 600 +proxysql_mysql_monitor_enabled: true +proxysql_mysql_monitor_groupreplication_healthcheck_interval: 5000 +proxysql_mysql_monitor_groupreplication_healthcheck_timeout: 800 +proxysql_mysql_monitor_history: 600000 +proxysql_mysql_monitor_ping_interval: 10000 +proxysql_mysql_monitor_ping_max_failures: 3 +proxysql_mysql_monitor_ping_timeout: 1000 +proxysql_mysql_monitor_query_interval: 60000 +proxysql_mysql_monitor_query_timeout: 100 +proxysql_mysql_monitor_read_only_interval: 1500 +proxysql_mysql_monitor_read_only_max_timeout_count: 3 +proxysql_mysql_monitor_read_only_timeout: 500 +proxysql_mysql_monitor_replication_lag_interval: 10000 +proxysql_mysql_monitor_replication_lag_timeout: 1000 +proxysql_mysql_monitor_replication_lag_use_percona_heartbeat: +proxysql_mysql_monitor_slave_lag_when_null: 60 +proxysql_mysql_monitor_wait_timeout: true +proxysql_mysql_monitor_writer_is_also_reader: true + +# stats and logging +proxysql_mysql_commands_stats: true +proxysql_mysql_eventslog_filename: +proxysql_mysql_eventslog_filesize: 104857600 +proxysql_mysql_hostgroup_manager_verbose: 0 +proxysql_mysql_long_query_time: 1000 +proxysql_mysql_query_digests: true +proxysql_mysql_query_digests_lowercase: false +proxysql_mysql_query_digests_max_digest_length: 2048 +proxysql_mysql_query_digests_max_query_length: 65000 +proxysql_mysql_stats_time_backend_query: false +proxysql_mysql_stats_time_query_processor: false +proxysql_mysql_verbose_query_error: false diff --git a/roles/proxysql/handlers/main.yml b/roles/proxysql/handlers/main.yml new file mode 100644 index 0000000..3ed9a13 --- /dev/null +++ b/roles/proxysql/handlers/main.yml @@ -0,0 +1,33 @@ +--- +- name: proxysql | handler | manage admin config + proxysql_global_variables: + config_file: "~/.my.cnf" + variable: "admin-{{ item.value.variable }}" + value: "{{ item.value.variable_value }}" + loop: "{{ proxysql_admin_variables|dict2items }}" + listen: update proxysql config + +- name: proxysql | handler | manage mysql config + proxysql_global_variables: + config_file: "~/.my.cnf" + variable: "mysql-{{ item.value.variable }}" + value: "{{ item.value.variable_value }}" + loop: "{{ proxysql_mysql_variables|dict2items }}" + listen: update proxysql config + +- name: proxysql | handler | manage mysql options + proxysql_global_variables: + config_file: "~/.my.cnf" + variable: "mysql-{{ item.value.variable }}" + value: "{{ item.value.variable_value }}" + load_to_runtime: false + save_to_disk: true + loop: "{{ proxysql_mysql_options|dict2items }}" + listen: update proxysql config + +- name: proxysql | handler | restart proxysql + service: + name: proxysql + state: restarted + when: proxysql_force_restart + listen: restart proxysql diff --git a/roles/proxysql/meta/main.yml b/roles/proxysql/meta/main.yml new file mode 100644 index 0000000..c0afeec --- /dev/null +++ b/roles/proxysql/meta/main.yml @@ -0,0 +1,20 @@ +--- +galaxy_info: + author: Ben Mildren + description: Ansible role to install and configure ProxySQL + company: DigitalOcean + + license: license (BSD) + min_ansible_version: 2.9 + + platforms: + - name: Ubuntu + versions: + - xenial + - bionic + + galaxy_tags: + - proxysql + - mysql + +dependencies: [] diff --git a/roles/proxysql/molecule/default/converge.yml b/roles/proxysql/molecule/default/converge.yml new file mode 100644 index 0000000..662cb23 --- /dev/null +++ b/roles/proxysql/molecule/default/converge.yml @@ -0,0 +1,5 @@ +--- +- name: Converge + hosts: all + roles: + - role: proxysql diff --git a/roles/proxysql/molecule/default/molecule.yml b/roles/proxysql/molecule/default/molecule.yml new file mode 100644 index 0000000..c96fea7 --- /dev/null +++ b/roles/proxysql/molecule/default/molecule.yml @@ -0,0 +1,46 @@ +--- +dependency: + name: galaxy +driver: + name: docker +lint: | + set -e + yamllint . + ansible-lint . + flake8 +platforms: + - name: test-proxysql-01 + image: "geerlingguy/docker-${MOLECULE_DISTRO:-ubuntu1804}-ansible:latest" + command: ${MOLECULE_DOCKER_COMMAND:-""} + volumes: + - /sys/fs/cgroup:/sys/fs/cgroup:ro + privileged: true + pre_build_image: true +provisioner: + name: ansible +scenario: + name: default + converge_sequence: + - dependency + - create + - prepare + - converge + test_sequence: + - lint + - destroy + - dependency + - syntax + - create + - prepare + - converge + - idempotence + # - side_effect + - verify + - destroy +verifier: + name: testinfra + env: + PYTHONWARNINGS: "ignore:.*U.*mode is deprecated:DeprecationWarning" + options: + # show which tests where executed in test output + v: 1 diff --git a/roles/proxysql/molecule/default/prepare.yml b/roles/proxysql/molecule/default/prepare.yml new file mode 100644 index 0000000..07f5235 --- /dev/null +++ b/roles/proxysql/molecule/default/prepare.yml @@ -0,0 +1,20 @@ +--- +- name: Prepare + hosts: all + tasks: + - name: fix trusty image + block: + + - name: remove removed repo + file: + name: '/etc/apt/sources.list.d/jonathonf-python-2_7-trusty.list' + state: absent + + - name: install python-apt + apt: + name: + - python-apt + - python-pkg-resources=3.3-1ubuntu1 + state: present + + when: ansible_lsb.major_release|int == 14 diff --git a/roles/proxysql/molecule/default/tests/test_default.py b/roles/proxysql/molecule/default/tests/test_default.py new file mode 100644 index 0000000..25a93c3 --- /dev/null +++ b/roles/proxysql/molecule/default/tests/test_default.py @@ -0,0 +1,81 @@ +import os +import pytest + +import testinfra.utils.ansible_runner + +testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner( + os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all') + + +def test_hosts_file(host): + f = host.file('/etc/hosts') + + assert f.exists + assert f.user == 'root' + assert f.group == 'root' + + +proxysql_user_attributes = ("user_name," + "group_name") + + +@pytest.mark.parametrize(proxysql_user_attributes, [ + ("proxysql", "proxysql"), +]) +def test_proxysql_users(host, + user_name, + group_name): + u = host.user(user_name) + + assert u.exists + assert u.group == group_name + + +proxysql_file_attributes = ("proxysql_file," + "proxysql_file_user," + "proxysql_file_group," + "proxysql_file_mode") + + +@pytest.mark.parametrize(proxysql_file_attributes, [ + ("/root/.my.cnf", None, None, 0o600), + ("/etc/proxysql.cnf", "proxysql", "proxysql", 0o644), +]) +def test_proxysql_files(host, + proxysql_file, + proxysql_file_user, + proxysql_file_group, + proxysql_file_mode): + f = host.file(proxysql_file) + + assert f.exists + assert f.is_file + if proxysql_file_user: + assert f.user == proxysql_file_user + if proxysql_file_group: + assert f.group == proxysql_file_group + if proxysql_file_mode: + assert f.mode == proxysql_file_mode + + +@pytest.mark.parametrize("proxysql_package", [ + ("percona-server-client-5.7"), + ("proxysql"), +]) +def test_proxysql_packages(host, + proxysql_package): + + pkg = host.package(proxysql_package) + + assert pkg.is_installed + + +@pytest.mark.parametrize("proxysql_service", [ + ("proxysql"), +]) +def test_proxysql_services(host, + proxysql_service): + svc = host.service(proxysql_service) + + assert svc.is_enabled + assert svc.is_running diff --git a/roles/proxysql/tasks/config.yml b/roles/proxysql/tasks/config.yml new file mode 100644 index 0000000..db7038f --- /dev/null +++ b/roles/proxysql/tasks/config.yml @@ -0,0 +1,32 @@ +--- +- name: proxysql | config | copy client my.cnf + template: + src: client.my.cnf.j2 + dest: ~/.my.cnf + mode: 0600 + +- name: proxysql | config | copy proxysql config + template: + src: proxysql.cnf.j2 + dest: /etc/proxysql.cnf + owner: "{{ proxysql_user }}" + group: "{{ proxysql_group }}" + mode: 0644 + notify: + - update proxysql config + - restart proxysql + +- name: proxysql | config | enable and start proxysql + service: + name: proxysql + state: started + enabled: true + +- name: proxysql | config | wait for proxysql + wait_for: + port: "{{ proxysql_admin_port }}" + state: started + timeout: 30 + +- name: proxysql | config | update dynamic config + meta: flush_handlers diff --git a/roles/proxysql/tasks/install.yml b/roles/proxysql/tasks/install.yml new file mode 100644 index 0000000..538841a --- /dev/null +++ b/roles/proxysql/tasks/install.yml @@ -0,0 +1,90 @@ +--- +- name: proxysql | install | update apt cache + apt: + cache_valid_time: 14400 + changed_when: false + ignore_errors: "{{ ansible_check_mode }}" + +- name: proxysql | install | install (trusty specific) + block: + + - name: proxysql | install | install platform specific prereqs + apt: + name: "{{ lookup('vars', 'proxysql_' + ansible_lsb.codename + '_prereqs') }}" + state: present + environment: + DEBIAN_FRONTEND: noninteractive + + rescue: + + - name: proxysql | install | handle the error if we failed in check mode, with python-apt uninstalled + assert: + that: ansible_failed_result.msg is match("python-apt must be installed to use check mode.*") + fail_msg: "unknown error during package install" + success_msg: "running in check mode without python-apt installed, ignoring error" + + when: ansible_lsb.major_release|int == 14 + + +- name: proxysql | install | install + block: + + - name: proxysql | install | install platform specific prereqs + apt: + name: "{{ proxysql_prereqs }}" + state: present + environment: + DEBIAN_FRONTEND: noninteractive + + - name: proxysql | install | install python packages + pip: + name: "{{ proxysql_python_packages }}" + executable: pip3 + + rescue: + + - name: proxysql | install | handle the error if we failed in check mode, with python-apt uninstalled + assert: + that: ansible_failed_result.msg is match("python-apt must be installed to use check mode.*") + fail_msg: "unknown error during package install" + success_msg: "running in check mode without python-apt installed, ignoring error" + + when: ansible_lsb.major_release|int > 14 + +- name: proxysql | install | install + block: + + - name: proxysql | install | add apt signing key for percona + apt_key: + keyserver: keyserver.ubuntu.com + id: 4D1BB29D63D98E422B2113B19334A25F8507EFA5 + state: present + + + - name: proxysql | install | add percona repositories + apt_repository: + repo: "{{ item }}" + state: present + loop: "{{ percona_mysql_repos }}" + + - name: proxysql | install | install packages required by proxysql + apt: + name: "{{ proxysql_additional_packages }}" + state: present + environment: + DEBIAN_FRONTEND: noninteractive + + - name: proxysql | install | install proxysql release + apt: + deb: "{{ proxysql_release }}" + state: present + notify: + - restart proxysql + + rescue: + + - name: proxysql | install | handle the error if we failed in check mode, with python-apt uninstalled + assert: + that: ansible_failed_result is search("python-apt must be installed to use check mode") + fail_msg: "unknown error during package install" + success_msg: "running in check mode without python-apt installed, ignoring error" diff --git a/roles/proxysql/tasks/main.yml b/roles/proxysql/tasks/main.yml new file mode 100644 index 0000000..9836f36 --- /dev/null +++ b/roles/proxysql/tasks/main.yml @@ -0,0 +1,22 @@ +--- +- import_tasks: setvars.yml + tags: + - always + +- block: + + - import_tasks: users.yml + tags: + - users + - config + - import_tasks: install.yml + when: not proxysql_mysql_use_custom_build + tags: + - install + + become: true + become_user: root + +- import_tasks: config.yml + tags: + - config diff --git a/roles/proxysql/tasks/setvars.yml b/roles/proxysql/tasks/setvars.yml new file mode 100644 index 0000000..fe26b48 --- /dev/null +++ b/roles/proxysql/tasks/setvars.yml @@ -0,0 +1,8 @@ +--- +- name: proxysql | setvars | set users + set_fact: + admin_credentials_list: "{{ admin_credentials_list | default([]) + [ item.username + ':' + item.password ] }}" + loop: + - { username: "{{ proxysql_admin_user }}", password: "{{ proxysql_admin_password }}" } + - { username: "{{ proxysql_admin_cluster_username }}", password: "{{ proxysql_admin_cluster_password }}" } + when: item.username and item.password diff --git a/roles/proxysql/tasks/users.yml b/roles/proxysql/tasks/users.yml new file mode 100644 index 0000000..eb8ea67 --- /dev/null +++ b/roles/proxysql/tasks/users.yml @@ -0,0 +1,16 @@ +--- +- name: proxysql | users | create group for proxysql + group: + name: "{{ proxysql_group }}" + system: true + state: present + +- name: proxysql | users | create user for proxysql + user: + name: "{{ proxysql_user }}" + system: true + group: "{{ proxysql_group }}" + comment: "Proxysql Service" + home: "{{ proxysql_datadir }}" + shell: /usr/sbin/nologin + state: present diff --git a/roles/proxysql/templates/client.my.cnf.j2 b/roles/proxysql/templates/client.my.cnf.j2 new file mode 100644 index 0000000..12d44d5 --- /dev/null +++ b/roles/proxysql/templates/client.my.cnf.j2 @@ -0,0 +1,6 @@ +### {{ ansible_managed }} +[client] +user=admin +password=admin +host={{ proxysql_admin_bind_address }} +port={{ proxysql_admin_port }} diff --git a/roles/proxysql/templates/proxysql.cnf.j2 b/roles/proxysql/templates/proxysql.cnf.j2 new file mode 100644 index 0000000..28ce54c --- /dev/null +++ b/roles/proxysql/templates/proxysql.cnf.j2 @@ -0,0 +1,21 @@ +#jinja2: lstrip_blocks: "true" +datadir="{{ proxysql_datadir }}" +restart_on_missing_heartbeats={{ proxysql_restart_missing_heartbeats }} + +admin_variables= +{ +{% for config_item in proxysql_admin_variables|dictsort %} + {% if config_item.1.variable_value is not none %} + {{ config_item.1.variable }}={{ config_item.1.variable_value | to_json }} + {% endif %} +{% endfor %} +} + +mysql_variables= +{ +{% for config_item in proxysql_mysql_variables|dictsort %} + {% if config_item.1.variable_value is not none %} + {{ config_item.1.variable }}={{ config_item.1.variable_value | to_json }} + {% endif %} +{% endfor %} +} diff --git a/roles/proxysql/vars/main.yml b/roles/proxysql/vars/main.yml new file mode 100644 index 0000000..ffc02dc --- /dev/null +++ b/roles/proxysql/vars/main.yml @@ -0,0 +1,385 @@ +--- +### percona repo +percona_mysql_repos: + - deb http://repo.percona.com/apt {{ ansible_lsb.codename }} main + - deb-src http://repo.percona.com/apt {{ ansible_lsb.codename }} main + +### platform specific packages +proxysql_trusty_prereqs: + - libgnutls28-dev + +proxysql_prereqs: + - dirmngr + - python-setuptools + - python3-pip + - python3-virtualenv + +### proxysql required packages +proxysql_release: "{{ proxysql_download_src }}/v{{ proxysql_version }}/proxysql_{{ proxysql_version }}-{{ ansible_lsb.id | lower }}{{ ansible_lsb.major_release }}_amd64.deb" + +proxysql_additional_packages: + - percona-server-client-{{ proxysql_mysql_client_version }} + - python-mysqldb + +proxysql_python_packages: + - pymysql + +proxysql_admin_variables: + admin_credentials: + variable: "admin_credentials" + variable_value: "{{ admin_credentials_list | join(';') }}" + checksum_mysql_query_rules: + variable: "checksum_mysql_query_rules" + variable_value: "{{ proxysql_admin_checksum_mysql_query_rules | to_json }}" + checksum_mysql_servers: + variable: "checksum_mysql_servers" + variable_value: "{{ proxysql_admin_checksum_mysql_servers | to_json }}" + checksum_mysql_users: + variable: "checksum_mysql_users" + variable_value: "{{ proxysql_admin_checksum_mysql_users | to_json }}" + cluster_check_interval_ms: + variable: "cluster_check_interval_ms" + variable_value: "{{ proxysql_admin_cluster_check_interval_ms }}" + cluster_check_status_frequency: + variable: "cluster_check_status_frequency" + variable_value: "{{ proxysql_admin_cluster_check_status_frequency }}" + cluster_mysql_query_rules_diffs_before_sync: + variable: "cluster_mysql_query_rules_diffs_before_sync" + variable_value: "{{ proxysql_admin_cluster_mysql_query_rules_diffs_before_sync }}" + cluster_mysql_query_rules_save_to_disk: + variable: "cluster_mysql_query_rules_save_to_disk" + variable_value: "{{ proxysql_admin_cluster_mysql_query_rules_save_to_disk | to_json }}" + cluster_mysql_servers_diffs_before_sync: + variable: "cluster_mysql_servers_diffs_before_sync" + variable_value: "{{ proxysql_admin_cluster_mysql_servers_diffs_before_sync }}" + cluster_mysql_servers_save_to_disk: + variable: "cluster_mysql_servers_save_to_disk" + variable_value: "{{ proxysql_admin_cluster_mysql_servers_save_to_disk | to_json }}" + cluster_mysql_users_diffs_before_sync: + variable: "cluster_mysql_users_diffs_before_sync" + variable_value: "{{ proxysql_admin_cluster_mysql_users_diffs_before_sync }}" + cluster_mysql_users_save_to_disk: + variable: "cluster_mysql_users_save_to_disk" + variable_value: "{{ proxysql_admin_cluster_mysql_users_save_to_disk | to_json }}" + cluster_proxysql_servers_diffs_before_sync: + variable: "cluster_proxysql_servers_diffs_before_sync" + variable_value: "{{ proxysql_admin_cluster_proxysql_servers_diffs_before_sync }}" + cluster_proxysql_servers_save_to_disk: + variable: "cluster_proxysql_servers_save_to_disk" + variable_value: "{{ proxysql_admin_cluster_proxysql_servers_save_to_disk | to_json }}" + hash_passwords: + variable: "hash_passwords" + variable_value: "{{ proxysql_admin_hash_passwords | to_json }}" + mysql_ifaces: + variable: "mysql_ifaces" + variable_value: "{{ proxysql_admin_bind_address }}:{{ proxysql_admin_port }};{{ proxysql_admin_socket }}" + read_only: + variable: "read_only" + variable_value: "{{ proxysql_admin_read_only | to_json }}" + refresh_interval: + variable: "refresh_interval" + variable_value: "{{ proxysql_admin_refresh_interval }}" + stats_credentials: + variable: "stats_credentials" + variable_value: "{{ proxysql_admin_stats_user }}:{{ proxysql_admin_stats_password }}" + stats_mysql_connection_pool: + variable: "stats_mysql_connection_pool" + variable_value: "{{ proxysql_admin_stats_mysql_connection_pool }}" + stats_mysql_connections: + variable: "stats_mysql_connections" + variable_value: "{{ proxysql_admin_stats_mysql_connections }}" + stats_mysql_query_cache: + variable: "stats_mysql_query_cache" + variable_value: "{{ proxysql_admin_stats_mysql_query_cache }}" + stats_system_cpu: + variable: "stats_system_cpu" + variable_value: "{{ proxysql_admin_stats_system_cpu }}" + stats_system_memory: + variable: "stats_system_memory" + variable_value: "{{ proxysql_admin_stats_system_memory }}" + web_enabled: + variable: "web_enabled" + variable_value: "{{ proxysql_admin_web_enabled | to_json }}" + web_port: + variable: "web_port" + variable_value: "{{ proxysql_admin_web_port }}" + +proxysql_mysql_variables: + autocommit_false_is_transaction: + variable: "autocommit_false_is_transaction" + variable_value: "{{ proxysql_mysql_autocommit_false_is_transaction | to_json }}" + autocommit_false_not_reusable: + variable: "autocommit_false_not_reusable" + variable_value: "{{ proxysql_mysql_autocommit_false_not_reusable | to_json }}" + client_found_rows: + variable: "client_found_rows" + variable_value: "{{ proxysql_mysql_client_found_rows | to_json }}" + commands_stats: + variable: "commands_stats" + variable_value: "{{ proxysql_mysql_commands_stats | to_json }}" + connect_retries_delay: + variable: "connect_retries_delay" + variable_value: "{{ proxysql_mysql_connect_retries_delay }}" + connect_retries_on_failure: + variable: "connect_retries_on_failure" + variable_value: "{{ proxysql_mysql_connect_retries_on_failure }}" + connect_timeout_server: + variable: "connect_timeout_server" + variable_value: "{{ proxysql_mysql_connect_timeout_server }}" + connect_timeout_server_max: + variable: "connect_timeout_server_max" + variable_value: "{{ proxysql_mysql_connect_timeout_server_max }}" + connection_delay_multiplex_ms: + variable: "connection_delay_multiplex_ms" + variable_value: "{{ proxysql_mysql_connection_delay_multiplex_ms }}" + connection_max_age_ms: + variable: "connection_max_age_ms" + variable_value: "{{ proxysql_mysql_connection_max_age_ms }}" + connpoll_reset_queue_length: + variable: "connpoll_reset_queue_length" + variable_value: "{{ proxysql_mysql_connpoll_reset_queue_length }}" + default_charset: + variable: "default_charset" + variable_value: "{{ proxysql_mysql_default_charset }}" + default_max_latency_ms: + variable: "default_max_latency_ms" + variable_value: "{{ proxysql_mysql_default_max_latency_ms }}" + default_query_delay: + variable: "default_query_delay" + variable_value: "{{ proxysql_mysql_default_query_delay }}" + default_query_timeout: + variable: "default_query_timeout" + variable_value: "{{ proxysql_mysql_default_query_timeout }}" + default_schema: + variable: "default_schema" + variable_value: "{{ proxysql_mysql_default_schema }}" + default_sql_mode: + variable: "default_sql_mode" + variable_value: "{{ proxysql_mysql_default_sql_mode }}" + default_time_zone: + variable: "default_time_zone" + variable_value: "{{ proxysql_mysql_default_time_zone }}" + eventslog_filename: + variable: "eventslog_filename" + variable_value: "{{ proxysql_mysql_eventslog_filename }}" + eventslog_filesize: + variable: "eventslog_filesize" + variable_value: "{{ proxysql_mysql_eventslog_filesize }}" + enforce_autocommit_on_reads: + variable: "enforce_autocommit_on_reads" + variable_value: "{{ proxysql_mysql_enforce_autocommit_on_reads | to_json}}" + forward_autocommit: + variable: "forward_autocommit" + variable_value: "{{ proxysql_mysql_forward_autocommit | to_json}}" + free_connections_pct: + variable: "free_connections_pct" + variable_value: "{{ proxysql_mysql_free_connections_pct }}" + hostgroup_manager_verbose: + variable: "hostgroup_manager_verbose" + variable_value: "{{ proxysql_mysql_hostgroup_manager_verbose }}" + init_connect: + variable: "init_connect" + variable_value: "{{ proxysql_mysql_init_connect }}" + kill_backend_connection_when_disconnect: + variable: "kill_backend_connection_when_disconnect" + variable_value: "{{ proxysql_mysql_kill_backend_connection_when_disconnect | to_json }}" + long_query_time: + variable: "long_query_time" + variable_value: "{{ proxysql_mysql_long_query_time }}" + max_allowed_packet: + variable: "max_allowed_packet" + variable_value: "{{ proxysql_mysql_max_allowed_packet }}" + max_connections: + variable: "max_connections" + variable_value: "{{ proxysql_mysql_max_connections }}" + max_stmts_cache: + variable: "max_stmts_cache" + variable_value: "{{ proxysql_mysql_max_stmts_cache }}" + max_stmts_per_connection: + variable: "max_stmts_per_connection" + variable_value: "{{ proxysql_mysql_max_stmts_per_connection }}" + max_transaction_time: + variable: "max_transaction_time" + variable_value: "{{ proxysql_mysql_max_transaction_time }}" + mirror_max_concurrency: + variable: "mirror_max_concurrency" + variable_value: "{{ proxysql_mysql_mirror_max_concurrency }}" + mirror_max_queue_length: + variable: "mirror_max_queue_length" + variable_value: "{{ proxysql_mysql_mirror_max_queue_length }}" + monitor_connect_interval: + variable: "monitor_connect_interval" + variable_value: "{{ proxysql_mysql_monitor_connect_interval }}" + monitor_connect_timeout: + variable: "monitor_connect_timeout" + variable_value: "{{ proxysql_mysql_monitor_connect_timeout }}" + monitor_enabled: + variable: "monitor_enabled" + variable_value: "{{ proxysql_mysql_monitor_enabled | to_json }}" + monitor_groupreplication_healthcheck_interval: + variable: "monitor_groupreplication_healthcheck_interval" + variable_value: "{{ proxysql_mysql_monitor_groupreplication_healthcheck_interval }}" + monitor_groupreplication_healthcheck_timeout: + variable: "monitor_groupreplication_healthcheck_timeout" + variable_value: "{{ proxysql_mysql_monitor_groupreplication_healthcheck_timeout }}" + monitor_history: + variable: "monitor_history" + variable_value: "{{ proxysql_mysql_monitor_history }}" + monitor_password: + variable: "monitor_password" + variable_value: "{{ proxysql_mysql_monitor_password }}" + monitor_ping_interval: + variable: "monitor_ping_interval" + variable_value: "{{ proxysql_mysql_monitor_ping_interval }}" + monitor_ping_max_failures: + variable: "monitor_ping_max_failures" + variable_value: "{{ proxysql_mysql_monitor_ping_max_failures }}" + monitor_ping_timeout: + variable: "monitor_ping_timeout" + variable_value: "{{ proxysql_mysql_monitor_ping_timeout }}" + monitor_query_interval: + variable: "monitor_query_interval" + variable_value: "{{ proxysql_mysql_monitor_query_interval }}" + monitor_query_timeout: + variable: "monitor_query_timeout" + variable_value: "{{ proxysql_mysql_monitor_query_timeout }}" + monitor_read_only_interval: + variable: "monitor_read_only_interval" + variable_value: "{{ proxysql_mysql_monitor_read_only_interval }}" + monitor_read_only_max_timeout_count: + variable: "monitor_read_only_max_timeout_count" + variable_value: "{{ proxysql_mysql_monitor_read_only_max_timeout_count }}" + monitor_read_only_timeout: + variable: "monitor_read_only_timeout" + variable_value: "{{ proxysql_mysql_monitor_read_only_timeout }}" + monitor_replication_lag_interval: + variable: "monitor_replication_lag_interval" + variable_value: "{{ proxysql_mysql_monitor_replication_lag_interval }}" + monitor_replication_lag_timeout: + variable: "monitor_replication_lag_timeout" + variable_value: "{{ proxysql_mysql_monitor_replication_lag_timeout }}" + monitor_replication_lag_use_percona_heartbeat: + variable: "monitor_replication_lag_use_percona_heartbeat" + variable_value: "{{ proxysql_mysql_monitor_replication_lag_use_percona_heartbeat }}" + monitor_slave_lag_when_null: + variable: "monitor_slave_lag_when_null" + variable_value: "{{ proxysql_mysql_monitor_slave_lag_when_null }}" + monitor_username: + variable: "monitor_username" + variable_value: "{{ proxysql_mysql_monitor_username }}" + monitor_wait_timeout: + variable: "monitor_wait_timeout" + variable_value: "{{ proxysql_mysql_monitor_wait_timeout | to_json }}" + monitor_writer_is_also_reader: + variable: "monitor_writer_is_also_reader" + variable_value: "{{ proxysql_mysql_monitor_writer_is_also_reader | to_json }}" + multiplexing: + variable: "multiplexing" + variable_value: "{{ proxysql_mysql_multiplexing | to_json }}" + mysql_interfaces: + variable: "interfaces" + variable_value: "{{ proxysql_mysql_bind_address }}:{{ proxysql_mysql_port }};{{ proxysql_mysql_socket }}" + ping_interval_server_msec: + variable: "ping_interval_server_msec" + variable_value: "{{ proxysql_mysql_ping_interval_server_msec }}" + ping_timeout_server: + variable: "ping_timeout_server" + variable_value: "{{ proxysql_mysql_ping_timeout_server }}" + poll_timeout: + variable: "poll_timeout" + variable_value: "{{ proxysql_mysql_poll_timeout }}" + poll_timeout_on_failure: + variable: "poll_timeout_on_failure" + variable_value: "{{ proxysql_mysql_poll_timeout_on_failure }}" + query_cache_size_mb: + variable: "query_cache_size_MB" + variable_value: "{{ proxysql_mysql_query_cache_size_mb }}" + query_digests: + variable: "query_digests" + variable_value: "{{ proxysql_mysql_query_digests | to_json }}" + query_digests_lowercase: + variable: "query_digests_lowercase" + variable_value: "{{ proxysql_mysql_query_digests_lowercase | to_json }}" + query_digests_max_digest_length: + variable: "query_digests_max_digest_length" + variable_value: "{{ proxysql_mysql_query_digests_max_digest_length }}" + query_digests_max_query_length: + variable: "query_digests_max_query_length" + variable_value: "{{ proxysql_mysql_query_digests_max_query_length }}" + query_processor_iterations: + variable: "query_processor_iterations" + variable_value: "{{ proxysql_mysql_query_processor_iterations }}" + query_processor_regex: + variable: "query_processor_regex" + variable_value: "{{ proxysql_mysql_query_processor_regex }}" + query_retries_on_failure: + variable: "query_retries_on_failure" + variable_value: "{{ proxysql_mysql_query_retries_on_failure }}" + server_capabilities: + variable: "server_capabilities" + variable_value: "{{ proxysql_mysql_server_capabilities }}" + server_version: + variable: "server_version" + variable_value: "{{ proxysql_mysql_server_version }}" + session_idle_ms: + variable: "session_idle_ms" + variable_value: "{{ proxysql_mysql_session_idle_ms }}" + session_idle_show_processlist: + variable: "session_idle_show_processlist" + variable_value: "{{ proxysql_mysql_session_idle_show_processlist | to_json }}" + sessions_sort: + variable: "sessions_sort" + variable_value: "{{ proxysql_mysql_sessions_sort | to_json }}" + shun_on_failures: + variable: "shun_on_failures" + variable_value: "{{ proxysql_mysql_shun_on_failures }}" + shun_recovery_time_sec: + variable: "shun_recovery_time_sec" + variable_value: "{{ proxysql_mysql_shun_recovery_time_sec }}" + ssl_p2s_ca: + variable: "ssl_p2s_ca" + variable_value: "{{ proxysql_mysql_ssl_p2s_ca }}" + ssl_p2s_cert: + variable: "ssl_p2s_cert" + variable_value: "{{ proxysql_mysql_ssl_p2s_cert }}" + ssl_p2s_cipher: + variable: "ssl_p2s_cipher" + variable_value: "{{ proxysql_mysql_ssl_p2s_cipher }}" + ssl_p2s_key: + variable: "ssl_p2s_key" + variable_value: "{{ proxysql_mysql_ssl_p2s_key }}" + stats_time_backend_query: + variable: "stats_time_backend_query" + variable_value: "{{ proxysql_mysql_stats_time_backend_query | to_json }}" + stats_time_query_processor: + variable: "stats_time_query_processor" + variable_value: "{{ proxysql_mysql_stats_time_query_processor | to_json }}" + threshold_query_length: + variable: "threshold_query_length" + variable_value: "{{ proxysql_mysql_threshold_query_length }}" + threshold_resultset_size: + variable: "threshold_resultset_size" + variable_value: "{{ proxysql_mysql_threshold_resultset_size }}" + throttle_connections_per_sec_to_hostgroup: + variable: "throttle_connections_per_sec_to_hostgroup" + variable_value: "{{ proxysql_mysql_throttle_connections_per_sec_to_hostgroup }}" + throttle_max_bytes_per_second_to_client: + variable: "throttle_max_bytes_per_second_to_client" + variable_value: "{{ proxysql_mysql_throttle_max_bytes_per_second_to_client }}" + throttle_ratio_server_to_client: + variable: "throttle_ratio_server_to_client" + variable_value: "{{ proxysql_mysql_throttle_ratio_server_to_client }}" + verbose_query_error: + variable: "verbose_query_error" + variable_value: "{{ proxysql_mysql_verbose_query_error | to_json }}" + wait_timeout: + variable: "wait_timeout" + variable_value: "{{ proxysql_mysql_mysql_wait_timeout }}" + +proxysql_mysql_options: + mysql_threads: + variable: "threads" + variable_value: "{{ proxysql_mysql_threads }}" + mysql_stacksize: + variable: "stacksize" + variable_value: "{{ proxysql_mysql_stacksize }}" diff --git a/tests/integration/targets/setup_proxysql/defaults/main.yml b/tests/integration/targets/setup_proxysql/defaults/main.yml new file mode 100644 index 0000000..d994a84 --- /dev/null +++ b/tests/integration/targets/setup_proxysql/defaults/main.yml @@ -0,0 +1,5 @@ +--- +proxysql_download_src: https://github.com/sysown/proxysql/releases/download +proxysql_version: 2.0.10 + +proxysql_mysql_client_version: 5.7 diff --git a/tests/integration/targets/setup_proxysql/tasks/config.yml b/tests/integration/targets/setup_proxysql/tasks/config.yml new file mode 100644 index 0000000..a66a01a --- /dev/null +++ b/tests/integration/targets/setup_proxysql/tasks/config.yml @@ -0,0 +1,12 @@ +--- +- name: proxysql | config | enable and start proxysql + service: + name: proxysql + state: started + enabled: true + +- name: proxysql | config | wait for proxysql + wait_for: + port: 6032 + state: started + timeout: 30 diff --git a/tests/integration/targets/setup_proxysql/tasks/install.yml b/tests/integration/targets/setup_proxysql/tasks/install.yml new file mode 100644 index 0000000..5ceb67a --- /dev/null +++ b/tests/integration/targets/setup_proxysql/tasks/install.yml @@ -0,0 +1,28 @@ +--- +- name: proxysql | install | add apt signing key for percona + apt_key: + keyserver: keyserver.ubuntu.com + id: 4D1BB29D63D98E422B2113B19334A25F8507EFA5 + state: present + +- name: proxysql | install | add percona repositories + apt_repository: + repo: "{{ item }}" + state: present + loop: "{{ proxysql_percona_mysql_repos }}" + +- name: proxysql | install | install proxysql release + apt: + deb: "{{ proxysql_release }}" + state: present + +- name: proxysql | install | install packages required by proxysql + apt: + name: "{{ proxysql_percona_mysql_packages }}" + state: present + environment: + DEBIAN_FRONTEND: noninteractive + +- name: proxysql | install | install python packages + pip: + name: "{{ proxysql_python_packages }}" diff --git a/tests/integration/targets/setup_proxysql/tasks/main.yml b/tests/integration/targets/setup_proxysql/tasks/main.yml new file mode 100644 index 0000000..66e9a65 --- /dev/null +++ b/tests/integration/targets/setup_proxysql/tasks/main.yml @@ -0,0 +1,3 @@ +--- +- import_tasks: install.yml +- import_tasks: config.yml diff --git a/tests/integration/targets/setup_proxysql/vars/main.yml b/tests/integration/targets/setup_proxysql/vars/main.yml new file mode 100644 index 0000000..443d789 --- /dev/null +++ b/tests/integration/targets/setup_proxysql/vars/main.yml @@ -0,0 +1,13 @@ +--- +proxysql_release: "{{ proxysql_download_src }}/v{{ proxysql_version }}/proxysql_{{ proxysql_version }}-{{ ansible_lsb.id | lower }}{{ ansible_lsb.major_release }}_amd64.deb" + +proxysql_percona_mysql_repos: + - deb http://repo.percona.com/apt {{ ansible_lsb.codename }} main + - deb-src http://repo.percona.com/apt {{ ansible_lsb.codename }} main + +proxysql_percona_mysql_packages: + - percona-server-client-{{ proxysql_mysql_client_version }} + # - python-mysqldb + +proxysql_python_packages: + - pymysql diff --git a/tests/integration/targets/test_proxysql_backend_servers/defaults/main.yml b/tests/integration/targets/test_proxysql_backend_servers/defaults/main.yml new file mode 100644 index 0000000..f5a4be5 --- /dev/null +++ b/tests/integration/targets/test_proxysql_backend_servers/defaults/main.yml @@ -0,0 +1,8 @@ +--- +test_host: mysql01 + +test_proxysql_backend_servers_check_mode: false +test_proxysql_backend_servers_in_memory_only: false +test_proxysql_backend_servers_with_delayed_persist: false +test_proxysql_backend_servers_check_idempotence: false +test_proxysql_backend_servers_cleanup_after_test: true diff --git a/tests/integration/targets/test_proxysql_backend_servers/meta/main.yml b/tests/integration/targets/test_proxysql_backend_servers/meta/main.yml new file mode 100644 index 0000000..2023b8d --- /dev/null +++ b/tests/integration/targets/test_proxysql_backend_servers/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_proxysql diff --git a/tests/integration/targets/test_proxysql_backend_servers/tasks/base_test.yml b/tests/integration/targets/test_proxysql_backend_servers/tasks/base_test.yml new file mode 100644 index 0000000..4cd1d09 --- /dev/null +++ b/tests/integration/targets/test_proxysql_backend_servers/tasks/base_test.yml @@ -0,0 +1,57 @@ +--- +### prepare +- name: "{{ role_name }} | {{ current_test }} | are we performing a delete" + set_fact: + test_delete: "{{ current_test | regex_search('^test_delete') | ternary(true, false) }}" + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start" + include_tasks: "{{ test_delete|ternary('setup_test_server', 'cleanup_test_servers') }}.yml" + when: not test_proxysql_backend_servers_check_idempotence + +### when + +- name: "{{ role_name }} | {{ current_test }} | {{ test_delete|ternary('delete','create') }} test backend mysql server" + proxysql_backend_servers: + login_user: admin + login_password: admin + hostname: "{{ test_host }}" + state: "{{ test_delete|ternary('absent', 'present') }}" + save_to_disk: "{{ not test_proxysql_backend_servers_in_memory_only }}" + load_to_runtime: "{{ not test_proxysql_backend_servers_in_memory_only }}" + check_mode: "{{ test_proxysql_backend_servers_check_mode }}" + register: status + +- name: "{{ role_name }} | {{ current_test }} | persist the changes to disk, and load to runtime" + block: + + - name: "{{ role_name }} | {{ current_test }} | save the mysql servers config from memory to disk" + proxysql_manage_config: + login_user: admin + login_password: admin + action: SAVE + config_settings: MYSQL SERVERS + direction: TO + config_layer: DISK + + - name: "{{ role_name }} | {{ current_test }} | load the mysql servers config from memory to runtime" + proxysql_manage_config: + login_user: admin + login_password: admin + action: LOAD + config_settings: MYSQL SERVERS + direction: TO + config_layer: RUNTIME + + when: test_proxysql_backend_servers_with_delayed_persist + +- name: "{{ role_name }} | {{ current_test }} | check if test backend mysql server exists in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT hostname FROM mysql_servers where hostname = '{{ test_host }}'" + register: memory_result + +- name: "{{ role_name }} | {{ current_test }} | check if test backend mysql server exists on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT hostname FROM disk.mysql_servers where hostname = '{{ test_host }}'" + register: disk_result + +- name: "{{ role_name }} | {{ current_test }} | check if test backend mysql server exists in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT hostname FROM runtime_mysql_servers where hostname = '{{ test_host }}'" + register: runtime_result diff --git a/tests/integration/targets/test_proxysql_backend_servers/tasks/cleanup_test_servers.yml b/tests/integration/targets/test_proxysql_backend_servers/tasks/cleanup_test_servers.yml new file mode 100644 index 0000000..070e15b --- /dev/null +++ b/tests/integration/targets/test_proxysql_backend_servers/tasks/cleanup_test_servers.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start/finish" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure no hosts are created" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"DELETE FROM mysql_servers" + + - name: "{{ role_name }} | {{ current_test }} | ensure no hosts are saved on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL SERVERS TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure no hosts are loaded to runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL SERVERS TO RUNTIME" diff --git a/tests/integration/targets/test_proxysql_backend_servers/tasks/main.yml b/tests/integration/targets/test_proxysql_backend_servers/tasks/main.yml new file mode 100644 index 0000000..bef6bbe --- /dev/null +++ b/tests/integration/targets/test_proxysql_backend_servers/tasks/main.yml @@ -0,0 +1,83 @@ +--- +### tests + +- name: "{{ role_name }} | test_create_using_check_mode | test create backend server using check mode" + import_tasks: test_create_using_check_mode.yml + vars: + test_proxysql_backend_servers_check_mode: true + +- name: "{{ role_name }} | test_delete_using_check_mode | test delete backend server using check mode" + import_tasks: test_delete_using_check_mode.yml + vars: + test_proxysql_backend_servers_check_mode: true + +- name: "{{ role_name }} | test_create_backend_server | test create backend server" + import_tasks: test_create_backend_server.yml + vars: + test_proxysql_backend_servers_cleanup_after_test: false +- name: "{{ role_name }} | test_create_backend_server | test idempotence of create backend server" + import_tasks: test_create_backend_server.yml + vars: + test_proxysql_backend_servers_check_idempotence: true + +- name: "{{ role_name }} | test_delete_backend_server | test delete backend server" + import_tasks: test_delete_backend_server.yml + vars: + test_proxysql_backend_servers_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_backend_server | test idempotence of delete backend server" + import_tasks: test_delete_backend_server.yml + vars: + test_proxysql_backend_servers_check_idempotence: true + +- name: "{{ role_name }} | test_create_backend_server_in_memory_only | test create backend server in memory" + import_tasks: test_create_backend_server_in_memory_only.yml + vars: + test_proxysql_backend_servers_in_memory_only: true + test_proxysql_backend_servers_cleanup_after_test: false +- name: "{{ role_name }} | test_create_backend_server_in_memory_only | test idempotence of create backend server in memory" + import_tasks: test_create_backend_server_in_memory_only.yml + vars: + test_proxysql_backend_servers_in_memory_only: true + test_proxysql_backend_servers_check_idempotence: true + +- name: "{{ role_name }} | test_delete_backend_server_in_memory_only | test delete backend server in memory" + import_tasks: test_delete_backend_server_in_memory_only.yml + vars: + test_proxysql_backend_servers_in_memory_only: true + test_proxysql_backend_servers_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_backend_server_in_memory_only | test idempotence of delete backend server in memory" + import_tasks: test_delete_backend_server_in_memory_only.yml + vars: + test_proxysql_backend_servers_in_memory_only: true + test_proxysql_backend_servers_check_idempotence: true + +- name: "{{ role_name }} | test_create_backend_server_with_delayed_persist | test create backend server with delayed save to disk/load to runtime" + import_tasks: test_create_backend_server_with_delayed_persist.yml + vars: + test_proxysql_backend_servers_in_memory_only: true + test_proxysql_backend_servers_with_delayed_persist: true + test_proxysql_backend_servers_cleanup_after_test: false +- name: "{{ role_name }} | test_create_backend_server_with_delayed_persist | test idempotence of create backend server with delayed save to disk/load to runtime" + import_tasks: test_create_backend_server_with_delayed_persist.yml + vars: + test_proxysql_backend_servers_in_memory_only: true + test_proxysql_backend_servers_with_delayed_persist: true + test_proxysql_backend_servers_check_idempotence: true + +- name: "{{ role_name }} | test_delete_backend_server_with_delayed_persist | test delete backend server with delayed save to disk/load to runtime" + import_tasks: test_delete_backend_server_with_delayed_persist.yml + vars: + test_proxysql_backend_servers_in_memory_only: true + test_proxysql_backend_servers_with_delayed_persist: true + test_proxysql_backend_servers_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_backend_server_with_delayed_persist | test idempotence of delete backend server with delayed save to disk/load to runtime" + import_tasks: test_delete_backend_server_with_delayed_persist.yml + vars: + test_proxysql_backend_servers_in_memory_only: true + test_proxysql_backend_servers_with_delayed_persist: true + test_proxysql_backend_servers_check_idempotence: true + +### teardown + +- name: "{{ role_name }} | teardown | perform teardown" + import_tasks: teardown.yml diff --git a/tests/integration/targets/test_proxysql_backend_servers/tasks/setup_test_server.yml b/tests/integration/targets/test_proxysql_backend_servers/tasks/setup_test_server.yml new file mode 100644 index 0000000..64d9dac --- /dev/null +++ b/tests/integration/targets/test_proxysql_backend_servers/tasks/setup_test_server.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure test backend mysql server is created when we start" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure test backend mysql server is created in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"INSERT OR REPLACE INTO mysql_servers (hostname) VALUES ('{{ test_host }}')" + + - name: "{{ role_name }} | {{ current_test }} | ensure test backend mysql server is created on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL SERVERS TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure test backend mysql server is created in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL SERVERS TO RUNTIME" diff --git a/tests/integration/targets/test_proxysql_backend_servers/tasks/teardown.yml b/tests/integration/targets/test_proxysql_backend_servers/tasks/teardown.yml new file mode 100644 index 0000000..0cb5ae1 --- /dev/null +++ b/tests/integration/targets/test_proxysql_backend_servers/tasks/teardown.yml @@ -0,0 +1,6 @@ +--- +- name: "{{ role_name }} | teardown | uninstall proxysql" + apt: + name: proxysql + purge: true + state: absent diff --git a/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server.yml b/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server.yml new file mode 100644 index 0000000..fc19995 --- /dev/null +++ b/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_backend_server | set current test" + set_fact: + current_test: test_create_backend_server + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create server reported a change" + assert: + that: + - "status is {{ test_proxysql_backend_servers_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create server did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_host }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create server did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_host }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create server did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_host }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_servers.yml + when: test_proxysql_backend_servers_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server_in_memory_only.yml b/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server_in_memory_only.yml new file mode 100644 index 0000000..fe6148b --- /dev/null +++ b/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server_in_memory_only.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_backend_server_in_memory_only | set current test" + set_fact: + current_test: test_create_backend_server_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create server reported a change" + assert: + that: + - "status is {{ test_proxysql_backend_servers_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create server did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_host }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create server didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create server didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_servers.yml + when: test_proxysql_backend_servers_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server_with_delayed_persist.yml b/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server_with_delayed_persist.yml new file mode 100644 index 0000000..c4c8956 --- /dev/null +++ b/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_backend_server_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_backend_server_with_delayed_persist | set current test" + set_fact: + current_test: test_create_backend_server_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create server reported a change" + assert: + that: + - "status is {{ test_proxysql_backend_servers_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create server did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_host }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create server did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_host }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create server did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_host }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_servers.yml + when: test_proxysql_backend_servers_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_using_check_mode.yml b/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_using_check_mode.yml new file mode 100644 index 0000000..2c2dfa9 --- /dev/null +++ b/tests/integration/targets/test_proxysql_backend_servers/tasks/test_create_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_create_using_check_mode | set current test" + set_fact: + current_test: test_create_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create server in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm create server in check mode didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create server in check mode didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create server in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_servers.yml diff --git a/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server.yml b/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server.yml new file mode 100644 index 0000000..17305ed --- /dev/null +++ b/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_backend_server | set current test" + set_fact: + current_test: test_delete_backend_server + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete server reported a change" + assert: + that: + - "status is {{ test_proxysql_backend_servers_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_servers.yml + when: test_proxysql_backend_servers_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server_in_memory_only.yml b/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server_in_memory_only.yml new file mode 100644 index 0000000..b7882fb --- /dev/null +++ b/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server_in_memory_only.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_backend_server_in_memory_only | set current test" + set_fact: + current_test: test_delete_backend_server_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete server reported a change" + assert: + that: + - "status is {{ test_proxysql_backend_servers_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_host }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_host }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_servers.yml + when: test_proxysql_backend_servers_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server_with_delayed_persist.yml b/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server_with_delayed_persist.yml new file mode 100644 index 0000000..516a0a4 --- /dev/null +++ b/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_backend_server_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_backend_server_with_delayed_persist | set current test" + set_fact: + current_test: test_delete_backend_server_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete server reported a change" + assert: + that: + - "status is {{ test_proxysql_backend_servers_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_servers.yml + when: test_proxysql_backend_servers_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_using_check_mode.yml b/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_using_check_mode.yml new file mode 100644 index 0000000..17aac72 --- /dev/null +++ b/tests/integration/targets/test_proxysql_backend_servers/tasks/test_delete_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_delete_using_check_mode | set current test" + set_fact: + current_test: test_delete_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete server in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server in check mode didn't make a change in memory" + assert: + that: memory_result.stdout == '{{ test_host }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server in check mode didn't make a change on disk" + assert: + that: disk_result.stdout == '{{ test_host }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete server in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_host }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_servers.yml diff --git a/tests/integration/targets/test_proxysql_global_variables/defaults/main.yml b/tests/integration/targets/test_proxysql_global_variables/defaults/main.yml new file mode 100644 index 0000000..d653938 --- /dev/null +++ b/tests/integration/targets/test_proxysql_global_variables/defaults/main.yml @@ -0,0 +1,8 @@ +--- +test_variable: mysql-max_connections + +test_proxysql_global_variables_check_mode: false +test_proxysql_global_variables_in_memory_only: false +test_proxysql_global_variables_with_delayed_persist: false +test_proxysql_global_variables_check_idempotence: false +test_proxysql_global_variables_cleanup_after_test: true diff --git a/tests/integration/targets/test_proxysql_global_variables/meta/main.yml b/tests/integration/targets/test_proxysql_global_variables/meta/main.yml new file mode 100644 index 0000000..2023b8d --- /dev/null +++ b/tests/integration/targets/test_proxysql_global_variables/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_proxysql diff --git a/tests/integration/targets/test_proxysql_global_variables/tasks/base_test.yml b/tests/integration/targets/test_proxysql_global_variables/tasks/base_test.yml new file mode 100644 index 0000000..7676ddf --- /dev/null +++ b/tests/integration/targets/test_proxysql_global_variables/tasks/base_test.yml @@ -0,0 +1,54 @@ +--- +### prepare + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start" + include_tasks: setup_global_variables.yml + when: not test_proxysql_global_variables_check_idempotence + +### when + +- name: "{{ role_name }} | {{ current_test }} | update global variable" + proxysql_global_variables: + login_user: admin + login_password: admin + variable: "{{ test_variable }}" + value: "{{ updated_variable_value }}" + save_to_disk: "{{ not test_proxysql_global_variables_in_memory_only }}" + load_to_runtime: "{{ not test_proxysql_global_variables_in_memory_only }}" + check_mode: "{{ test_proxysql_global_variables_check_mode }}" + register: status + +- name: "{{ role_name }} | {{ current_test }} | persist the changes to disk, and load to runtime" + block: + + - name: "{{ role_name }} | {{ current_test }} | save global variables from memory to disk" + proxysql_manage_config: + login_user: admin + login_password: admin + action: SAVE + config_settings: MYSQL VARIABLES + direction: TO + config_layer: DISK + + - name: "{{ role_name }} | {{ current_test }} | load global variables from memory to runtime" + proxysql_manage_config: + login_user: admin + login_password: admin + action: LOAD + config_settings: MYSQL VARIABLES + direction: TO + config_layer: RUNTIME + + when: test_proxysql_global_variables_with_delayed_persist + +- name: "{{ role_name }} | {{ current_test }} | check if updated global variable value exists in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT variable_value FROM global_variables where variable_name='{{ test_variable }}'" + register: memory_result + +- name: "{{ role_name }} | {{ current_test }} | check if updated global variable value exists on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT variable_value FROM disk.global_variables where variable_name='{{ test_variable }}'" + register: disk_result + +- name: "{{ role_name }} | {{ current_test }} | check if updated global variable value exists in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT variable_value FROM runtime_global_variables where variable_name='{{ test_variable }}'" + register: runtime_result diff --git a/tests/integration/targets/test_proxysql_global_variables/tasks/cleanup_global_variables.yml b/tests/integration/targets/test_proxysql_global_variables/tasks/cleanup_global_variables.yml new file mode 100644 index 0000000..68241ac --- /dev/null +++ b/tests/integration/targets/test_proxysql_global_variables/tasks/cleanup_global_variables.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start/finish" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure variable value set to original value" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"UPDATE global_variables SET variable_value={{ original_variable_value }} WHERE variable_name='{{ test_variable }}'" + + - name: "{{ role_name }} | {{ current_test }} | ensure original variable value is saved on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL VARIABLES TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure original variable value is loaded to runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL VARIABLES TO RUNTIME" diff --git a/tests/integration/targets/test_proxysql_global_variables/tasks/main.yml b/tests/integration/targets/test_proxysql_global_variables/tasks/main.yml new file mode 100644 index 0000000..d598ce4 --- /dev/null +++ b/tests/integration/targets/test_proxysql_global_variables/tasks/main.yml @@ -0,0 +1,48 @@ +--- +- name: "{{ role_name }} | setvars | populate base variables for tests" + import_tasks: setvars.yml + +### tests + +- name: "{{ role_name }} | test_update_variable_value_using_check_mode | test updating global variable using check mode" + import_tasks: test_update_variable_value_using_check_mode.yml + vars: + test_proxysql_global_variables_check_mode: true + +- name: "{{ role_name }} | test_update_variable_value | test updating global variable value" + import_tasks: test_update_variable_value.yml + vars: + test_proxysql_global_variables_cleanup_after_test: false +- name: "{{ role_name }} | test_update_variable_value | test idempotence of updating global variable value" + import_tasks: test_update_variable_value.yml + vars: + test_proxysql_global_variables_check_idempotence: true + +- name: "{{ role_name }} | test_update_variable_value_in_memory_only | test updating global variable value in memory" + import_tasks: test_update_variable_value_in_memory_only.yml + vars: + test_proxysql_global_variables_in_memory_only: true + test_proxysql_global_variables_cleanup_after_test: false +- name: "{{ role_name }} | test_update_variable_value_in_memory_only | test idempotence of updating global variable value in memory" + import_tasks: test_update_variable_value_in_memory_only.yml + vars: + test_proxysql_global_variables_in_memory_only: true + test_proxysql_global_variables_check_idempotence: true + +- name: "{{ role_name }} | test_update_variable_value_with_delayed_persist | test updating global variable value with delayed save to disk/load to runtime" + import_tasks: test_update_variable_value_with_delayed_persist.yml + vars: + test_proxysql_global_variables_in_memory_only: true + test_proxysql_global_variables_with_delayed_persist: true + test_proxysql_global_variables_cleanup_after_test: false +- name: "{{ role_name }} | test_update_variable_value_with_delayed_persist | test idempotence of updating global variable value with delayed save to disk/load to runtime" + import_tasks: test_update_variable_value_with_delayed_persist.yml + vars: + test_proxysql_global_variables_in_memory_only: true + test_proxysql_global_variables_with_delayed_persist: true + test_proxysql_global_variables_check_idempotence: true + +### teardown + +- name: "{{ role_name }} | teardown | perform teardown" + import_tasks: teardown.yml diff --git a/tests/integration/targets/test_proxysql_global_variables/tasks/setup_global_variables.yml b/tests/integration/targets/test_proxysql_global_variables/tasks/setup_global_variables.yml new file mode 100644 index 0000000..a5b0a8c --- /dev/null +++ b/tests/integration/targets/test_proxysql_global_variables/tasks/setup_global_variables.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure test variable value is updated when we start" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure test variable value is updated in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"UPDATE global_variables SET variable_value={{ original_variable_value }} WHERE variable_name='{{ test_variable }}'" + + - name: "{{ role_name }} | {{ current_test }} | ensure test variable value is updated on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL VARIABLES TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure test variable value is updated in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL VARIABLES TO RUNTIME" diff --git a/tests/integration/targets/test_proxysql_global_variables/tasks/setvars.yml b/tests/integration/targets/test_proxysql_global_variables/tasks/setvars.yml new file mode 100644 index 0000000..6499f12 --- /dev/null +++ b/tests/integration/targets/test_proxysql_global_variables/tasks/setvars.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | setvars | get original variable value" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT variable_value FROM global_variables where variable_name='{{ test_variable }}'" + register: result + +- name: "{{ role_name }} | setvars | populate original_variable_value variable" + set_fact: + original_variable_value: "{{ result.stdout }}" + +- name: "{{ role_name }} | setvars | populate updated_variable_value variable" + set_fact: + updated_variable_value: "{{ result.stdout|int * 2 }}" diff --git a/tests/integration/targets/test_proxysql_global_variables/tasks/teardown.yml b/tests/integration/targets/test_proxysql_global_variables/tasks/teardown.yml new file mode 100644 index 0000000..0cb5ae1 --- /dev/null +++ b/tests/integration/targets/test_proxysql_global_variables/tasks/teardown.yml @@ -0,0 +1,6 @@ +--- +- name: "{{ role_name }} | teardown | uninstall proxysql" + apt: + name: proxysql + purge: true + state: absent diff --git a/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value.yml b/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value.yml new file mode 100644 index 0000000..ff5bc81 --- /dev/null +++ b/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_update_variable_value | set current test" + set_fact: + current_test: test_update_variable_value + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if updating variable value reported a change" + assert: + that: + - "status is {{ test_proxysql_global_variables_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm updating variable value did make a change in memory" + assert: + that: "memory_result.stdout == '{{ updated_variable_value }}'" + +- name: "{{ role_name }} | {{ current_test }} | confirm updating variable value did make a change on disk" + assert: + that: "disk_result.stdout == '{{ updated_variable_value }}'" + +- name: "{{ role_name }} | {{ current_test }} | confirm updating variable value did make a change to runtime" + assert: + that: "runtime_result.stdout == '{{ updated_variable_value }}'" + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_global_variables.yml + when: test_proxysql_global_variables_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_in_memory_only.yml b/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_in_memory_only.yml new file mode 100644 index 0000000..11297ec --- /dev/null +++ b/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_in_memory_only.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_update_variable_value_in_memory_only | set current test" + set_fact: + current_test: test_update_variable_value_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if updating variable value reported a change" + assert: + that: + - "status is {{ test_proxysql_global_variables_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm updating variable value did make a change in memory" + assert: + that: "memory_result.stdout == '{{ updated_variable_value }}'" + +- name: "{{ role_name }} | {{ current_test }} | confirm updating variable value didn't make a change on disk" + assert: + that: "disk_result.stdout == '{{ original_variable_value }}'" + +- name: "{{ role_name }} | {{ current_test }} | confirm updating variable value didn't make a change to runtime" + assert: + that: "runtime_result.stdout == '{{ original_variable_value }}'" + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_global_variables.yml + when: test_proxysql_global_variables_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_using_check_mode.yml b/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_using_check_mode.yml new file mode 100644 index 0000000..b98c2d2 --- /dev/null +++ b/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_update_variable_value_using_check_mode | set current test" + set_fact: + current_test: test_update_variable_value_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if updating variable value reported a change in check mode" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm create updating variable value didn't make a change in memory in check mode" + assert: + that: "memory_result.stdout == '{{ original_variable_value }}'" + +- name: "{{ role_name }} | {{ current_test }} | confirm create updating variable value didn't make a change on disk in check mode" + assert: + that: "disk_result.stdout == '{{ original_variable_value }}'" + +- name: "{{ role_name }} | {{ current_test }} | confirm create updating variable value didn't make a change to runtime in check mode" + assert: + that: "runtime_result.stdout == '{{ original_variable_value }}'" + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_global_variables.yml diff --git a/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_with_delayed_persist.yml b/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_with_delayed_persist.yml new file mode 100644 index 0000000..d6bb424 --- /dev/null +++ b/tests/integration/targets/test_proxysql_global_variables/tasks/test_update_variable_value_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_updated_variable_value_with_delayed_persist | set current test" + set_fact: + current_test: test_updated_variable_value_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if updating variable value reported a change" + assert: + that: + - "status is {{ test_proxysql_global_variables_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm updating variable value did make a change in memory" + assert: + that: "memory_result.stdout == '{{ updated_variable_value }}'" + +- name: "{{ role_name }} | {{ current_test }} | confirm updating variable value did make a change on disk" + assert: + that: "disk_result.stdout == '{{ updated_variable_value }}'" + +- name: "{{ role_name }} | {{ current_test }} | confirm updating variable value did make a change to runtime" + assert: + that: "runtime_result.stdout == '{{ updated_variable_value }}'" + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_global_variables.yml + when: test_proxysql_global_variables_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_mysql_users/defaults/main.yml b/tests/integration/targets/test_proxysql_mysql_users/defaults/main.yml new file mode 100644 index 0000000..bd4f787 --- /dev/null +++ b/tests/integration/targets/test_proxysql_mysql_users/defaults/main.yml @@ -0,0 +1,8 @@ +--- +test_user: productiondba + +test_proxysql_mysql_users_check_mode: false +test_proxysql_mysql_users_in_memory_only: false +test_proxysql_mysql_users_with_delayed_persist: false +test_proxysql_mysql_users_check_idempotence: false +test_proxysql_mysql_users_cleanup_after_test: true diff --git a/tests/integration/targets/test_proxysql_mysql_users/meta/main.yml b/tests/integration/targets/test_proxysql_mysql_users/meta/main.yml new file mode 100644 index 0000000..2023b8d --- /dev/null +++ b/tests/integration/targets/test_proxysql_mysql_users/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_proxysql diff --git a/tests/integration/targets/test_proxysql_mysql_users/tasks/base_test.yml b/tests/integration/targets/test_proxysql_mysql_users/tasks/base_test.yml new file mode 100644 index 0000000..8ac175b --- /dev/null +++ b/tests/integration/targets/test_proxysql_mysql_users/tasks/base_test.yml @@ -0,0 +1,57 @@ +--- +### prepare +- name: "{{ role_name }} | {{ current_test }} | are we performing a delete" + set_fact: + test_delete: "{{ current_test | regex_search('^test_delete') | ternary(true, false) }}" + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start" + include_tasks: "{{ test_delete|ternary('setup_test_user', 'cleanup_test_users') }}.yml" + when: not test_proxysql_mysql_users_check_idempotence + +### when + +- name: "{{ role_name }} | {{ current_test }} | {{ test_delete|ternary('delete','create') }} test mysql user" + proxysql_mysql_users: + login_user: admin + login_password: admin + username: productiondba + state: "{{ test_delete|ternary('absent', 'present') }}" + save_to_disk: "{{ not test_proxysql_mysql_users_in_memory_only }}" + load_to_runtime: "{{ not test_proxysql_mysql_users_in_memory_only }}" + check_mode: "{{ test_proxysql_mysql_users_check_mode }}" + register: status + +- name: "{{ role_name }} | {{ current_test }} | persist the changes to disk, and load to runtime" + block: + + - name: "{{ role_name }} | {{ current_test }} | save the mysql users config from memory to disk" + proxysql_manage_config: + login_user: admin + login_password: admin + action: SAVE + config_settings: MYSQL USERS + direction: TO + config_layer: DISK + + - name: "{{ role_name }} | {{ current_test }} | load the mysql users config from memory to runtime" + proxysql_manage_config: + login_user: admin + login_password: admin + action: LOAD + config_settings: MYSQL USERS + direction: TO + config_layer: RUNTIME + + when: test_proxysql_mysql_users_with_delayed_persist + +- name: "{{ role_name }} | {{ current_test }} | check if test mysql user exists in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT username FROM mysql_users where username = '{{ test_user }}'" + register: memory_result + +- name: "{{ role_name }} | {{ current_test }} | check if test mysql user exists on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT username FROM disk.mysql_users where username = '{{ test_user }}'" + register: disk_result + +- name: "{{ role_name }} | {{ current_test }} | check if test mysql user exists in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT username FROM runtime_mysql_users where username = '{{ test_user }}'" + register: runtime_result diff --git a/tests/integration/targets/test_proxysql_mysql_users/tasks/cleanup_test_users.yml b/tests/integration/targets/test_proxysql_mysql_users/tasks/cleanup_test_users.yml new file mode 100644 index 0000000..865f5ef --- /dev/null +++ b/tests/integration/targets/test_proxysql_mysql_users/tasks/cleanup_test_users.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start/finish" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure no users are created" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"DELETE FROM mysql_users" + + - name: "{{ role_name }} | {{ current_test }} | ensure no users are saved on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL USERS TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure no users are loaded to runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL USERS TO RUNTIME" diff --git a/tests/integration/targets/test_proxysql_mysql_users/tasks/main.yml b/tests/integration/targets/test_proxysql_mysql_users/tasks/main.yml new file mode 100644 index 0000000..d4d9320 --- /dev/null +++ b/tests/integration/targets/test_proxysql_mysql_users/tasks/main.yml @@ -0,0 +1,83 @@ +--- +### tests + +- name: "{{ role_name }} | test_create_using_check_mode | test create mysql user using check mode" + import_tasks: test_create_using_check_mode.yml + vars: + test_proxysql_mysql_users_check_mode: true + +- name: "{{ role_name }} | test_delete_using_check_mode | test delete mysql user using check mode" + import_tasks: test_delete_using_check_mode.yml + vars: + test_proxysql_mysql_users_check_mode: true + +- name: "{{ role_name }} | test_create_mysql_user | test create mysql user" + import_tasks: test_create_mysql_user.yml + vars: + test_proxysql_mysql_users_cleanup_after_test: false +- name: "{{ role_name }} | test_create_mysql_user | test idempotence of create mysql user" + import_tasks: test_create_mysql_user.yml + vars: + test_proxysql_mysql_users_check_idempotence: true + +- name: "{{ role_name }} | test_delete_mysql_user | test delete mysql user" + import_tasks: test_delete_mysql_user.yml + vars: + test_proxysql_mysql_users_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_mysql_user | test idempotence of delete mysql user" + import_tasks: test_delete_mysql_user.yml + vars: + test_proxysql_mysql_users_check_idempotence: true + +- name: "{{ role_name }} | test_create_mysql_user_in_memory_only | test create mysql user in memory" + import_tasks: test_create_mysql_user_in_memory_only.yml + vars: + test_proxysql_mysql_users_in_memory_only: true + test_proxysql_mysql_users_cleanup_after_test: false +- name: "{{ role_name }} | test_create_mysql_user_in_memory_only | test idempotence of create mysql user in memory" + import_tasks: test_create_mysql_user_in_memory_only.yml + vars: + test_proxysql_mysql_users_in_memory_only: true + test_proxysql_mysql_users_check_idempotence: true + +- name: "{{ role_name }} | test_delete_mysql_user_in_memory_only | test delete mysql user in memory" + import_tasks: test_delete_mysql_user_in_memory_only.yml + vars: + test_proxysql_mysql_users_in_memory_only: true + test_proxysql_mysql_users_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_mysql_user_in_memory_only | test idempotence of delete mysql user in memory" + import_tasks: test_delete_mysql_user_in_memory_only.yml + vars: + test_proxysql_mysql_users_in_memory_only: true + test_proxysql_mysql_users_check_idempotence: true + +- name: "{{ role_name }} | test_create_mysql_user_with_delayed_persist | test create mysql user with delayed save to disk/load to runtime" + import_tasks: test_create_mysql_user_with_delayed_persist.yml + vars: + test_proxysql_mysql_users_in_memory_only: true + test_proxysql_mysql_users_with_delayed_persist: true + test_proxysql_mysql_users_cleanup_after_test: false +- name: "{{ role_name }} | test_create_mysql_user_with_delayed_persist | test idempotence of create mysql user with delayed save to disk/load to runtime" + import_tasks: test_create_mysql_user_with_delayed_persist.yml + vars: + test_proxysql_mysql_users_in_memory_only: true + test_proxysql_mysql_users_with_delayed_persist: true + test_proxysql_mysql_users_check_idempotence: true + +- name: "{{ role_name }} | test_delete_mysql_user_with_delayed_persist | test delete mysql user with delayed save to disk/load to runtime" + import_tasks: test_delete_mysql_user_with_delayed_persist.yml + vars: + test_proxysql_mysql_users_in_memory_only: true + test_proxysql_mysql_users_with_delayed_persist: true + test_proxysql_mysql_users_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_mysql_user_with_delayed_persist | test idempotence of delete mysql user with delayed save to disk/load to runtime" + import_tasks: test_delete_mysql_user_with_delayed_persist.yml + vars: + test_proxysql_mysql_users_in_memory_only: true + test_proxysql_mysql_users_with_delayed_persist: true + test_proxysql_mysql_users_check_idempotence: true + +### teardown + +- name: "{{ role_name }} | teardown | perform teardown" + import_tasks: teardown.yml diff --git a/tests/integration/targets/test_proxysql_mysql_users/tasks/setup_test_user.yml b/tests/integration/targets/test_proxysql_mysql_users/tasks/setup_test_user.yml new file mode 100644 index 0000000..9ad43c9 --- /dev/null +++ b/tests/integration/targets/test_proxysql_mysql_users/tasks/setup_test_user.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure test mysql user is created when we start" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure test mysql user is created in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"INSERT OR REPLACE INTO mysql_users (username) VALUES ('{{ test_user }}')" + + - name: "{{ role_name }} | {{ current_test }} | ensure test mysql user is created on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL USERS TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure test mysql user is created in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL USERS TO RUNTIME" diff --git a/tests/integration/targets/test_proxysql_mysql_users/tasks/teardown.yml b/tests/integration/targets/test_proxysql_mysql_users/tasks/teardown.yml new file mode 100644 index 0000000..0cb5ae1 --- /dev/null +++ b/tests/integration/targets/test_proxysql_mysql_users/tasks/teardown.yml @@ -0,0 +1,6 @@ +--- +- name: "{{ role_name }} | teardown | uninstall proxysql" + apt: + name: proxysql + purge: true + state: absent diff --git a/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user.yml b/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user.yml new file mode 100644 index 0000000..81616b4 --- /dev/null +++ b/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_mysql_user | set current test" + set_fact: + current_test: test_create_mysql_user + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create user reported a change" + assert: + that: + - "status is {{ test_proxysql_mysql_users_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create user did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_user }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create user did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_user }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create user did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_user }}\n{{ test_user }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_users.yml + when: test_proxysql_mysql_users_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user_in_memory_only.yml b/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user_in_memory_only.yml new file mode 100644 index 0000000..6abd555 --- /dev/null +++ b/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user_in_memory_only.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_mysql_user_in_memory_only | set current test" + set_fact: + current_test: test_create_mysql_user_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create user reported a change" + assert: + that: + - "status is {{ test_proxysql_mysql_users_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create user did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_user }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create user didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create user didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_users.yml + when: test_proxysql_mysql_users_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user_with_delayed_persist.yml b/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user_with_delayed_persist.yml new file mode 100644 index 0000000..5a20282 --- /dev/null +++ b/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_mysql_user_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_mysql_user_with_delayed_persist | set current test" + set_fact: + current_test: test_create_mysql_user_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create user reported a change" + assert: + that: + - "status is {{ test_proxysql_mysql_users_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create user did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_user }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create user did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_user }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create user did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_user }}\n{{ test_user }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_users.yml + when: test_proxysql_mysql_users_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_using_check_mode.yml b/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_using_check_mode.yml new file mode 100644 index 0000000..314d630 --- /dev/null +++ b/tests/integration/targets/test_proxysql_mysql_users/tasks/test_create_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_create_using_check_mode | set current test" + set_fact: + current_test: test_create_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create user in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm create user in check mode didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create user in check mode didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create user in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_users.yml diff --git a/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user.yml b/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user.yml new file mode 100644 index 0000000..4882796 --- /dev/null +++ b/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_mysql_user | set current test" + set_fact: + current_test: test_delete_mysql_user + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete user reported a change" + assert: + that: + - "status is {{ test_proxysql_mysql_users_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_users.yml + when: test_proxysql_mysql_users_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user_in_memory_only.yml b/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user_in_memory_only.yml new file mode 100644 index 0000000..7967bf0 --- /dev/null +++ b/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user_in_memory_only.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_mysql_user_in_memory_only | set current test" + set_fact: + current_test: test_delete_mysql_user_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete user reported a change" + assert: + that: + - "status is {{ test_proxysql_mysql_users_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_user }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_user }}\n{{ test_user }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_users.yml + when: test_proxysql_mysql_users_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user_with_delayed_persist.yml b/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user_with_delayed_persist.yml new file mode 100644 index 0000000..4239b6b --- /dev/null +++ b/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_mysql_user_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_mysql_user_with_delayed_persist | set current test" + set_fact: + current_test: test_delete_mysql_user_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete user reported a change" + assert: + that: + - "status is {{ test_proxysql_mysql_users_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_users.yml + when: test_proxysql_mysql_users_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_using_check_mode.yml b/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_using_check_mode.yml new file mode 100644 index 0000000..a200154 --- /dev/null +++ b/tests/integration/targets/test_proxysql_mysql_users/tasks/test_delete_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_delete_using_check_mode | set current test" + set_fact: + current_test: test_delete_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete user in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user in check mode didn't make a change in memory" + assert: + that: memory_result.stdout == '{{ test_user }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user in check mode didn't make a change on disk" + assert: + that: disk_result.stdout == '{{ test_user }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete user in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_user }}\n{{ test_user }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_users.yml diff --git a/tests/integration/targets/test_proxysql_query_rules/defaults/main.yml b/tests/integration/targets/test_proxysql_query_rules/defaults/main.yml new file mode 100644 index 0000000..ffcd581 --- /dev/null +++ b/tests/integration/targets/test_proxysql_query_rules/defaults/main.yml @@ -0,0 +1,12 @@ +--- +test_user: 'guest_ro' +test_match_pattern: "^SELECT.*" +test_destination_hostgroup: 1 +test_active: 1 +test_retries: 3 + +test_proxysql_query_rules_check_mode: false +test_proxysql_query_rules_in_memory_only: false +test_proxysql_query_rules_with_delayed_persist: false +test_proxysql_query_rules_check_idempotence: false +test_proxysql_query_rules_cleanup_after_test: true diff --git a/tests/integration/targets/test_proxysql_query_rules/meta/main.yml b/tests/integration/targets/test_proxysql_query_rules/meta/main.yml new file mode 100644 index 0000000..2023b8d --- /dev/null +++ b/tests/integration/targets/test_proxysql_query_rules/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_proxysql diff --git a/tests/integration/targets/test_proxysql_query_rules/tasks/base_test.yml b/tests/integration/targets/test_proxysql_query_rules/tasks/base_test.yml new file mode 100644 index 0000000..af830bc --- /dev/null +++ b/tests/integration/targets/test_proxysql_query_rules/tasks/base_test.yml @@ -0,0 +1,61 @@ +--- +### prepare +- name: "{{ role_name }} | {{ current_test }} | are we performing a delete" + set_fact: + test_delete: "{{ current_test | regex_search('^test_delete') | ternary(true, false) }}" + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start" + include_tasks: "{{ test_delete|ternary('setup_test_query_rule', 'cleanup_test_query_rules') }}.yml" + when: not test_proxysql_query_rules_check_idempotence + +### when + +- name: "{{ role_name }} | {{ current_test }} | {{ test_delete|ternary('delete','create') }} test query rule" + proxysql_query_rules: + login_user: admin + login_password: admin + username: '{{ test_user }}' + match_pattern: '{{ test_match_pattern }}' + destination_hostgroup: '{{ test_destination_hostgroup }}' + active: '{{ test_active }}' + retries: '{{ test_retries }}' + state: "{{ test_delete|ternary('absent', 'present') }}" + save_to_disk: "{{ not test_proxysql_query_rules_in_memory_only }}" + load_to_runtime: "{{ not test_proxysql_query_rules_in_memory_only }}" + check_mode: "{{ test_proxysql_query_rules_check_mode }}" + register: status + +- name: "{{ role_name }} | {{ current_test }} | persist the changes to disk, and load to runtime" + block: + + - name: "{{ role_name }} | {{ current_test }} | save the query rules config from memory to disk" + proxysql_manage_config: + login_user: admin + login_password: admin + action: SAVE + config_settings: MYSQL QUERY RULES + direction: TO + config_layer: DISK + + - name: "{{ role_name }} | {{ current_test }} | load the query rules config from memory to runtime" + proxysql_manage_config: + login_user: admin + login_password: admin + action: LOAD + config_settings: MYSQL QUERY RULES + direction: TO + config_layer: RUNTIME + + when: test_proxysql_query_rules_with_delayed_persist + +- name: "{{ role_name }} | {{ current_test }} | check if test query rule exists in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT username || ',' || match_pattern || ',' || destination_hostgroup || ',' || active || ',' || retries FROM mysql_query_rules where username = '{{ test_user }}' and match_pattern = '{{ test_match_pattern }}' and destination_hostgroup and '{{ test_destination_hostgroup }}' and active = '{{ test_active }}' and retries = '{{ test_retries }}'" + register: memory_result + +- name: "{{ role_name }} | {{ current_test }} | check if test query rule exists on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT username || ',' || match_pattern || ',' || destination_hostgroup || ',' || active || ',' || retries FROM disk.mysql_query_rules where username = '{{ test_user }}' and match_pattern = '{{ test_match_pattern }}' and destination_hostgroup and '{{ test_destination_hostgroup }}' and active = '{{ test_active }}' and retries = '{{ test_retries }}'" + register: disk_result + +- name: "{{ role_name }} | {{ current_test }} | check if test query rule exists in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT username || ',' || match_pattern || ',' || destination_hostgroup || ',' || active || ',' || retries FROM runtime_mysql_query_rules where username = '{{ test_user }}' and match_pattern = '{{ test_match_pattern }}' and destination_hostgroup and '{{ test_destination_hostgroup }}' and active = '{{ test_active }}' and retries = '{{ test_retries }}'" + register: runtime_result diff --git a/tests/integration/targets/test_proxysql_query_rules/tasks/cleanup_test_query_rules.yml b/tests/integration/targets/test_proxysql_query_rules/tasks/cleanup_test_query_rules.yml new file mode 100644 index 0000000..3f233b7 --- /dev/null +++ b/tests/integration/targets/test_proxysql_query_rules/tasks/cleanup_test_query_rules.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start/finish" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure no query rules are created" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"DELETE FROM mysql_query_rules" + + - name: "{{ role_name }} | {{ current_test }} | ensure no query rules are saved on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL QUERY RULES TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure no query rules are loaded to runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL QUERY RULES TO RUNTIME" diff --git a/tests/integration/targets/test_proxysql_query_rules/tasks/main.yml b/tests/integration/targets/test_proxysql_query_rules/tasks/main.yml new file mode 100644 index 0000000..bd18b83 --- /dev/null +++ b/tests/integration/targets/test_proxysql_query_rules/tasks/main.yml @@ -0,0 +1,83 @@ +--- +### tests + +- name: "{{ role_name }} | test_create_using_check_mode | test create query rule using check mode" + import_tasks: test_create_using_check_mode.yml + vars: + test_proxysql_query_rules_check_mode: true + +- name: "{{ role_name }} | test_delete_using_check_mode | test delete query rule using check mode" + import_tasks: test_delete_using_check_mode.yml + vars: + test_proxysql_query_rules_check_mode: true + +- name: "{{ role_name }} | test_create_query_rule | test create query rule" + import_tasks: test_create_query_rule.yml + vars: + test_proxysql_query_rules_cleanup_after_test: false +- name: "{{ role_name }} | test_create_query_rule | test idempotence of create query rule" + import_tasks: test_create_query_rule.yml + vars: + test_proxysql_query_rules_check_idempotence: true + +- name: "{{ role_name }} | test_delete_query_rule | test delete query rule" + import_tasks: test_delete_query_rule.yml + vars: + test_proxysql_query_rules_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_query_rule | test idempotence of delete query rule" + import_tasks: test_delete_query_rule.yml + vars: + test_proxysql_query_rules_check_idempotence: true + +- name: "{{ role_name }} | test_create_query_rule_in_memory_only | test create query rule in memory" + import_tasks: test_create_query_rule_in_memory_only.yml + vars: + test_proxysql_query_rules_in_memory_only: true + test_proxysql_query_rules_cleanup_after_test: false +- name: "{{ role_name }} | test_create_query_rule_in_memory_only | test idempotence of create query rule in memory" + import_tasks: test_create_query_rule_in_memory_only.yml + vars: + test_proxysql_query_rules_in_memory_only: true + test_proxysql_query_rules_check_idempotence: true + +- name: "{{ role_name }} | test_delete_query_rule_in_memory_only | test delete query rule in memory" + import_tasks: test_delete_query_rule_in_memory_only.yml + vars: + test_proxysql_query_rules_in_memory_only: true + test_proxysql_query_rules_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_query_rule_in_memory_only | test idempotence of delete query rule in memory" + import_tasks: test_delete_query_rule_in_memory_only.yml + vars: + test_proxysql_query_rules_in_memory_only: true + test_proxysql_query_rules_check_idempotence: true + +- name: "{{ role_name }} | test_create_query_rule_with_delayed_persist | test create query rule with delayed save to disk/load to runtime" + import_tasks: test_create_query_rule_with_delayed_persist.yml + vars: + test_proxysql_query_rules_in_memory_only: true + test_proxysql_query_rules_with_delayed_persist: true + test_proxysql_query_rules_cleanup_after_test: false +- name: "{{ role_name }} | test_create_query_rule_with_delayed_persist | test idempotence of create query rule with delayed save to disk/load to runtime" + import_tasks: test_create_query_rule_with_delayed_persist.yml + vars: + test_proxysql_query_rules_in_memory_only: true + test_proxysql_query_rules_with_delayed_persist: true + test_proxysql_query_rules_check_idempotence: true + +- name: "{{ role_name }} | test_delete_query_rule_with_delayed_persist | test delete query rule with delayed save to disk/load to runtime" + import_tasks: test_delete_query_rule_with_delayed_persist.yml + vars: + test_proxysql_query_rules_in_memory_only: true + test_proxysql_query_rules_with_delayed_persist: true + test_proxysql_query_rules_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_query_rule_with_delayed_persist | test idempotence of delete query rule with delayed save to disk/load to runtime" + import_tasks: test_delete_query_rule_with_delayed_persist.yml + vars: + test_proxysql_query_rules_in_memory_only: true + test_proxysql_query_rules_with_delayed_persist: true + test_proxysql_query_rules_check_idempotence: true + +### teardown + +- name: "{{ role_name }} | teardown | perform teardown" + import_tasks: teardown.yml diff --git a/tests/integration/targets/test_proxysql_query_rules/tasks/setup_test_query_rule.yml b/tests/integration/targets/test_proxysql_query_rules/tasks/setup_test_query_rule.yml new file mode 100644 index 0000000..0af8365 --- /dev/null +++ b/tests/integration/targets/test_proxysql_query_rules/tasks/setup_test_query_rule.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure test query rule is created when we start" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure test query rule is created in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"INSERT OR REPLACE INTO mysql_query_rules (username, match_pattern, destination_hostgroup, active, retries) VALUES ('{{ test_user }}', '{{ test_match_pattern}}', '{{ test_destination_hostgroup }}', '{{ test_active }}', '{{ test_retries }}')" + + - name: "{{ role_name }} | {{ current_test }} | ensure test query rule is created on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL QUERY RULES TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure test query rule is created in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL QUERY RULES TO RUNTIME" diff --git a/tests/integration/targets/test_proxysql_query_rules/tasks/teardown.yml b/tests/integration/targets/test_proxysql_query_rules/tasks/teardown.yml new file mode 100644 index 0000000..0cb5ae1 --- /dev/null +++ b/tests/integration/targets/test_proxysql_query_rules/tasks/teardown.yml @@ -0,0 +1,6 @@ +--- +- name: "{{ role_name }} | teardown | uninstall proxysql" + apt: + name: proxysql + purge: true + state: absent diff --git a/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule.yml b/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule.yml new file mode 100644 index 0000000..f6fcfe4 --- /dev/null +++ b/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_query_rule | set current test" + set_fact: + current_test: test_create_query_rule + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create query rule reported a change" + assert: + that: + - "status is {{ test_proxysql_query_rules_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_query_rules.yml + when: test_proxysql_query_rules_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule_in_memory_only.yml b/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule_in_memory_only.yml new file mode 100644 index 0000000..4f0e5f6 --- /dev/null +++ b/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule_in_memory_only.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_query_rule_in_memory_only | set current test" + set_fact: + current_test: test_create_query_rule_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create query rule reported a change" + assert: + that: + - "status is {{ test_proxysql_query_rules_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_query_rules.yml + when: test_proxysql_query_rules_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule_with_delayed_persist.yml b/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule_with_delayed_persist.yml new file mode 100644 index 0000000..2914ebc --- /dev/null +++ b/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_query_rule_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_query_rule_with_delayed_persist | set current test" + set_fact: + current_test: test_create_query_rule_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create query rule reported a change" + assert: + that: + - "status is {{ test_proxysql_query_rules_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_query_rules.yml + when: test_proxysql_query_rules_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_using_check_mode.yml b/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_using_check_mode.yml new file mode 100644 index 0000000..6e217bc --- /dev/null +++ b/tests/integration/targets/test_proxysql_query_rules/tasks/test_create_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_create_using_check_mode | set current test" + set_fact: + current_test: test_create_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create query rule in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule in check mode didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule in check mode didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create query rule in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_query_rules.yml diff --git a/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule.yml b/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule.yml new file mode 100644 index 0000000..c17cb99 --- /dev/null +++ b/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_query_rule | set current test" + set_fact: + current_test: test_delete_query_rule + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete query rule reported a change" + assert: + that: + - "status is {{ test_proxysql_query_rules_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_query_rules.yml + when: test_proxysql_query_rules_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule_in_memory_only.yml b/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule_in_memory_only.yml new file mode 100644 index 0000000..a2e7b6a --- /dev/null +++ b/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule_in_memory_only.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_delete_query_rule_in_memory_only | set current test" + set_fact: + current_test: test_delete_query_rule_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete query rule reported a change" + assert: + that: + - "status is {{ test_proxysql_query_rules_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_query_rules.yml + when: test_proxysql_query_rules_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule_with_delayed_persist.yml b/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule_with_delayed_persist.yml new file mode 100644 index 0000000..5ef8c47 --- /dev/null +++ b/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_query_rule_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_query_rule_with_delayed_persist | set current test" + set_fact: + current_test: test_delete_query_rule_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete query rule reported a change" + assert: + that: + - "status is {{ test_proxysql_query_rules_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_query_rules.yml + when: test_proxysql_query_rules_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_using_check_mode.yml b/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_using_check_mode.yml new file mode 100644 index 0000000..4efbf44 --- /dev/null +++ b/tests/integration/targets/test_proxysql_query_rules/tasks/test_delete_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_delete_using_check_mode | set current test" + set_fact: + current_test: test_delete_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete query rule in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule in check mode didn't make a change in memory" + assert: + that: memory_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule in check mode didn't make a change on disk" + assert: + that: disk_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete query rule in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_user }},{{ test_match_pattern }},{{ test_destination_hostgroup }},{{ test_active }},{{ test_retries }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_query_rules.yml diff --git a/tests/integration/targets/test_proxysql_replication_hostgroups/defaults/main.yml b/tests/integration/targets/test_proxysql_replication_hostgroups/defaults/main.yml new file mode 100644 index 0000000..4e381d4 --- /dev/null +++ b/tests/integration/targets/test_proxysql_replication_hostgroups/defaults/main.yml @@ -0,0 +1,9 @@ +--- +test_writer_hostgroup: 1 +test_reader_hostgroup: 2 + +test_proxysql_replication_hostgroups_check_mode: false +test_proxysql_replication_hostgroups_in_memory_only: false +test_proxysql_replication_hostgroups_with_delayed_persist: false +test_proxysql_replication_hostgroups_check_idempotence: false +test_proxysql_replication_hostgroups_cleanup_after_test: true diff --git a/tests/integration/targets/test_proxysql_replication_hostgroups/meta/main.yml b/tests/integration/targets/test_proxysql_replication_hostgroups/meta/main.yml new file mode 100644 index 0000000..2023b8d --- /dev/null +++ b/tests/integration/targets/test_proxysql_replication_hostgroups/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_proxysql diff --git a/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/base_test.yml b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/base_test.yml new file mode 100644 index 0000000..466a7c6 --- /dev/null +++ b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/base_test.yml @@ -0,0 +1,58 @@ +--- +### prepare +- name: "{{ role_name }} | {{ current_test }} | are we performing a delete" + set_fact: + test_delete: "{{ current_test | regex_search('^test_delete') | ternary(true, false) }}" + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start" + include_tasks: "{{ test_delete|ternary('setup_test_replication_hostgroups', 'cleanup_test_replication_hostgroups') }}.yml" + when: not test_proxysql_replication_hostgroups_check_idempotence + +### when + +- name: "{{ role_name }} | {{ current_test }} | {{ test_delete|ternary('delete','create') }} test mysql replication hostgroup" + proxysql_replication_hostgroups: + login_user: admin + login_password: admin + writer_hostgroup: '{{ test_writer_hostgroup }}' + reader_hostgroup: '{{ test_reader_hostgroup }}' + state: "{{ test_delete|ternary('absent', 'present') }}" + save_to_disk: "{{ not test_proxysql_replication_hostgroups_in_memory_only }}" + load_to_runtime: "{{ not test_proxysql_replication_hostgroups_in_memory_only }}" + check_mode: "{{ test_proxysql_replication_hostgroups_check_mode }}" + register: status + +- name: "{{ role_name }} | {{ current_test }} | persist the changes to disk, and load to runtime" + block: + + - name: "{{ role_name }} | {{ current_test }} | save the replication hostgroups config from memory to disk" + proxysql_manage_config: + login_user: admin + login_password: admin + action: SAVE + config_settings: MYSQL SERVERS + direction: TO + config_layer: DISK + + - name: "{{ role_name }} | {{ current_test }} | load the replication hostgroups config from memory to runtime" + proxysql_manage_config: + login_user: admin + login_password: admin + action: LOAD + config_settings: MYSQL SERVERS + direction: TO + config_layer: RUNTIME + + when: test_proxysql_replication_hostgroups_with_delayed_persist + +- name: "{{ role_name }} | {{ current_test }} | check if test replication hostgroups exists in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT writer_hostgroup || ',' || reader_hostgroup FROM mysql_replication_hostgroups where writer_hostgroup = '{{ test_writer_hostgroup }}' and reader_hostgroup = '{{ test_reader_hostgroup }}'" + register: memory_result + +- name: "{{ role_name }} | {{ current_test }} | check if test replication hostgroups exists on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT writer_hostgroup || ',' || reader_hostgroup FROM disk.mysql_replication_hostgroups where writer_hostgroup = '{{ test_writer_hostgroup }}' and reader_hostgroup = '{{ test_reader_hostgroup }}'" + register: disk_result + +- name: "{{ role_name }} | {{ current_test }} | check if test replication hostgroups exists in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT writer_hostgroup || ',' || reader_hostgroup FROM runtime_mysql_replication_hostgroups where writer_hostgroup = '{{ test_writer_hostgroup }}' and reader_hostgroup = '{{ test_reader_hostgroup }}'" + register: runtime_result diff --git a/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/cleanup_test_replication_hostgroups.yml b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/cleanup_test_replication_hostgroups.yml new file mode 100644 index 0000000..d32262a --- /dev/null +++ b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/cleanup_test_replication_hostgroups.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start/finish" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure no replication hostgroups are created" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"DELETE FROM mysql_replication_hostgroups" + + - name: "{{ role_name }} | {{ current_test }} | ensure no replication hostgroups are saved on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL SERVERS TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure no replication hostgroups are loaded to runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL SERVERS TO RUNTIME" diff --git a/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/main.yml b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/main.yml new file mode 100644 index 0000000..de5f813 --- /dev/null +++ b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/main.yml @@ -0,0 +1,83 @@ +--- +### tests + +- name: "{{ role_name }} | test_create_using_check_mode | test create replication hostgroups using check mode" + import_tasks: test_create_using_check_mode.yml + vars: + test_proxysql_replication_hostgroups_check_mode: true + +- name: "{{ role_name }} | test_delete_using_check_mode | test delete replication hostgroups using check mode" + import_tasks: test_delete_using_check_mode.yml + vars: + test_proxysql_replication_hostgroups_check_mode: true + +- name: "{{ role_name }} | test_create_replication_hostgroups | test create replication hostgroups" + import_tasks: test_create_replication_hostgroups.yml + vars: + test_proxysql_replication_hostgroups_cleanup_after_test: false +- name: "{{ role_name }} | test_create_replication_hostgroups | test idempotence of create replication hostgroups" + import_tasks: test_create_replication_hostgroups.yml + vars: + test_proxysql_replication_hostgroups_check_idempotence: true + +- name: "{{ role_name }} | test_delete_replication_hostgroups | test delete replication hostgroups" + import_tasks: test_delete_replication_hostgroups.yml + vars: + test_proxysql_replication_hostgroups_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_replication_hostgroups | test idempotence of delete replication hostgroups" + import_tasks: test_delete_replication_hostgroups.yml + vars: + test_proxysql_replication_hostgroups_check_idempotence: true + +- name: "{{ role_name }} | test_create_replication_hostgroups_in_memory_only | test create replication hostgroups in memory" + import_tasks: test_create_replication_hostgroups_in_memory_only.yml + vars: + test_proxysql_replication_hostgroups_in_memory_only: true + test_proxysql_replication_hostgroups_cleanup_after_test: false +- name: "{{ role_name }} | test_create_replication_hostgroups_in_memory_only | test idempotence of create replication hostgroups in memory" + import_tasks: test_create_replication_hostgroups_in_memory_only.yml + vars: + test_proxysql_replication_hostgroups_in_memory_only: true + test_proxysql_replication_hostgroups_check_idempotence: true + +- name: "{{ role_name }} | test_delete_replication_hostgroups_in_memory_only | test delete replication hostgroups in memory" + import_tasks: test_delete_replication_hostgroups_in_memory_only.yml + vars: + test_proxysql_replication_hostgroups_in_memory_only: true + test_proxysql_replication_hostgroups_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_replication_hostgroups_in_memory_only | test idempotence of delete replication hostgroups in memory" + import_tasks: test_delete_replication_hostgroups_in_memory_only.yml + vars: + test_proxysql_replication_hostgroups_in_memory_only: true + test_proxysql_replication_hostgroups_check_idempotence: true + +- name: "{{ role_name }} | test_create_replication_hostgroups_with_delayed_persist | test create replication hostgroups with delayed save to disk/load to runtime" + import_tasks: test_create_replication_hostgroups_with_delayed_persist.yml + vars: + test_proxysql_replication_hostgroups_in_memory_only: true + test_proxysql_replication_hostgroups_with_delayed_persist: true + test_proxysql_replication_hostgroups_cleanup_after_test: false +- name: "{{ role_name }} | test_create_replication_hostgroups_with_delayed_persist | test idempotence of create replication hostgroups with delayed save to disk/load to runtime" + import_tasks: test_create_replication_hostgroups_with_delayed_persist.yml + vars: + test_proxysql_replication_hostgroups_in_memory_only: true + test_proxysql_replication_hostgroups_with_delayed_persist: true + test_proxysql_replication_hostgroups_check_idempotence: true + +- name: "{{ role_name }} | test_delete_replication_hostgroups_with_delayed_persist | test delete replication hostgroups with delayed save to disk/load to runtime" + import_tasks: test_delete_replication_hostgroups_with_delayed_persist.yml + vars: + test_proxysql_replication_hostgroups_in_memory_only: true + test_proxysql_replication_hostgroups_with_delayed_persist: true + test_proxysql_replication_hostgroups_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_replication_hostgroups_with_delayed_persist | test idempotence of delete replication hostgroups with delayed save to disk/load to runtime" + import_tasks: test_delete_replication_hostgroups_with_delayed_persist.yml + vars: + test_proxysql_replication_hostgroups_in_memory_only: true + test_proxysql_replication_hostgroups_with_delayed_persist: true + test_proxysql_replication_hostgroups_check_idempotence: true + +### teardown + +- name: "{{ role_name }} | teardown | perform teardown" + import_tasks: teardown.yml diff --git a/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/setup_test_replication_hostgroups.yml b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/setup_test_replication_hostgroups.yml new file mode 100644 index 0000000..03f759d --- /dev/null +++ b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/setup_test_replication_hostgroups.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure test replication hostgroups is created when we start" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure test replication hostgroups is created in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"INSERT OR REPLACE INTO mysql_replication_hostgroups (writer_hostgroup, reader_hostgroup) VALUES ('{{ test_writer_hostgroup }}', '{{ test_reader_hostgroup}}')" + + - name: "{{ role_name }} | {{ current_test }} | ensure test replication hostgroups is created on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE MYSQL SERVERS TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure test replication hostgroups is created in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD MYSQL SERVERS TO RUNTIME" diff --git a/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/teardown.yml b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/teardown.yml new file mode 100644 index 0000000..0cb5ae1 --- /dev/null +++ b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/teardown.yml @@ -0,0 +1,6 @@ +--- +- name: "{{ role_name }} | teardown | uninstall proxysql" + apt: + name: proxysql + purge: true + state: absent diff --git a/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups.yml b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups.yml new file mode 100644 index 0000000..43bfb9d --- /dev/null +++ b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_replication_hostgroups | set current test" + set_fact: + current_test: test_create_replication_hostgroups + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create replication hostgroups reported a change" + assert: + that: + - "status is {{ test_proxysql_replication_hostgroups_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_replication_hostgroups.yml + when: test_proxysql_replication_hostgroups_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups_in_memory_only.yml b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups_in_memory_only.yml new file mode 100644 index 0000000..66d26c3 --- /dev/null +++ b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups_in_memory_only.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_replication_hostgroups_in_memory_only | set current test" + set_fact: + current_test: test_create_replication_hostgroups_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create replication hostgroups reported a change" + assert: + that: + - "status is {{ test_proxysql_replication_hostgroups_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_replication_hostgroups.yml + when: test_proxysql_replication_hostgroups_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups_with_delayed_persist.yml b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups_with_delayed_persist.yml new file mode 100644 index 0000000..cd649a5 --- /dev/null +++ b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_replication_hostgroups_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_replication_hostgroups_with_delayed_persist | set current test" + set_fact: + current_test: test_create_replication_hostgroups_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create replication hostgroups reported a change" + assert: + that: + - "status is {{ test_proxysql_replication_hostgroups_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_replication_hostgroups.yml + when: test_proxysql_replication_hostgroups_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_using_check_mode.yml b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_using_check_mode.yml new file mode 100644 index 0000000..b9a2fb2 --- /dev/null +++ b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_create_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_create_using_check_mode | set current test" + set_fact: + current_test: test_create_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create replication hostgroups in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups in check mode didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups in check mode didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create replication hostgroups in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_replication_hostgroups.yml diff --git a/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups.yml b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups.yml new file mode 100644 index 0000000..ad3a189 --- /dev/null +++ b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_replication_hostgroups | set current test" + set_fact: + current_test: test_delete_replication_hostgroups + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete replication hostgroups reported a change" + assert: + that: + - "status is {{ test_proxysql_replication_hostgroups_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_replication_hostgroups.yml + when: test_proxysql_replication_hostgroups_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups_in_memory_only.yml b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups_in_memory_only.yml new file mode 100644 index 0000000..edd089a --- /dev/null +++ b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups_in_memory_only.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_delete_replication_hostgroups_in_memory_only | set current test" + set_fact: + current_test: test_delete_replication_hostgroups_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete replication hostgroups reported a change" + assert: + that: + - "status is {{ test_proxysql_replication_hostgroups_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_replication_hostgroups.yml + when: test_proxysql_replication_hostgroups_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups_with_delayed_persist.yml b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups_with_delayed_persist.yml new file mode 100644 index 0000000..03a1988 --- /dev/null +++ b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_replication_hostgroups_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_replication_hostgroups_with_delayed_persist | set current test" + set_fact: + current_test: test_delete_replication_hostgroups_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete replication hostgroups reported a change" + assert: + that: + - "status is {{ test_proxysql_replication_hostgroups_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_replication_hostgroups.yml + when: test_proxysql_replication_hostgroups_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_using_check_mode.yml b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_using_check_mode.yml new file mode 100644 index 0000000..1fb5879 --- /dev/null +++ b/tests/integration/targets/test_proxysql_replication_hostgroups/tasks/test_delete_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_delete_using_check_mode | set current test" + set_fact: + current_test: test_delete_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete replication hostgroups in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups in check mode didn't make a change in memory" + assert: + that: memory_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups in check mode didn't make a change on disk" + assert: + that: disk_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete replication hostgroups in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_writer_hostgroup }},{{ test_reader_hostgroup }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_replication_hostgroups.yml diff --git a/tests/integration/targets/test_proxysql_scheduler/defaults/main.yml b/tests/integration/targets/test_proxysql_scheduler/defaults/main.yml new file mode 100644 index 0000000..d7c0074 --- /dev/null +++ b/tests/integration/targets/test_proxysql_scheduler/defaults/main.yml @@ -0,0 +1,9 @@ +--- +test_interval_ms: 1000 +test_filename: '/opt/maintenance.py' + +test_proxysql_scheduler_check_mode: false +test_proxysql_scheduler_in_memory_only: false +test_proxysql_scheduler_with_delayed_persist: false +test_proxysql_scheduler_check_idempotence: false +test_proxysql_scheduler_cleanup_after_test: true diff --git a/tests/integration/targets/test_proxysql_scheduler/meta/main.yml b/tests/integration/targets/test_proxysql_scheduler/meta/main.yml new file mode 100644 index 0000000..2023b8d --- /dev/null +++ b/tests/integration/targets/test_proxysql_scheduler/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: + - setup_proxysql diff --git a/tests/integration/targets/test_proxysql_scheduler/tasks/base_test.yml b/tests/integration/targets/test_proxysql_scheduler/tasks/base_test.yml new file mode 100644 index 0000000..e1abf85 --- /dev/null +++ b/tests/integration/targets/test_proxysql_scheduler/tasks/base_test.yml @@ -0,0 +1,58 @@ +--- +### prepare +- name: "{{ role_name }} | {{ current_test }} | are we performing a delete" + set_fact: + test_delete: "{{ current_test | regex_search('^test_delete') | ternary(true, false) }}" + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start" + include_tasks: "{{ test_delete|ternary('setup_test_scheduler', 'cleanup_test_schedulers') }}.yml" + when: not test_proxysql_scheduler_check_idempotence + +### when + +- name: "{{ role_name }} | {{ current_test }} | {{ test_delete|ternary('delete','create') }} test scheduler" + proxysql_scheduler: + login_user: admin + login_password: admin + interval_ms: '{{ test_interval_ms }}' + filename: '{{ test_filename }}' + state: "{{ test_delete|ternary('absent', 'present') }}" + save_to_disk: "{{ not test_proxysql_scheduler_in_memory_only }}" + load_to_runtime: "{{ not test_proxysql_scheduler_in_memory_only }}" + check_mode: "{{ test_proxysql_scheduler_check_mode }}" + register: status + +- name: "{{ role_name }} | {{ current_test }} | persist the changes to disk, and load to runtime" + block: + + - name: "{{ role_name }} | {{ current_test }} | save the scheduler config from memory to disk" + proxysql_manage_config: + login_user: admin + login_password: admin + action: SAVE + config_settings: SCHEDULER + direction: TO + config_layer: DISK + + - name: "{{ role_name }} | {{ current_test }} | load the scheduler config from memory to runtime" + proxysql_manage_config: + login_user: admin + login_password: admin + action: LOAD + config_settings: SCHEDULER + direction: TO + config_layer: RUNTIME + + when: test_proxysql_scheduler_with_delayed_persist + +- name: "{{ role_name }} | {{ current_test }} | check if test scheduler exists in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT interval_ms || ',' || filename FROM scheduler where interval_ms = '{{ test_interval_ms }}' and filename = '{{ test_filename }}'" + register: memory_result + +- name: "{{ role_name }} | {{ current_test }} | check if test scheduler exists on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT interval_ms || ',' || filename FROM disk.scheduler where interval_ms = '{{ test_interval_ms }}' and filename = '{{ test_filename }}'" + register: disk_result + +- name: "{{ role_name }} | {{ current_test }} | check if test scheduler exists in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SELECT interval_ms || ',' || filename FROM runtime_scheduler where interval_ms = '{{ test_interval_ms }}' and filename = '{{ test_filename }}'" + register: runtime_result diff --git a/tests/integration/targets/test_proxysql_scheduler/tasks/cleanup_test_schedulers.yml b/tests/integration/targets/test_proxysql_scheduler/tasks/cleanup_test_schedulers.yml new file mode 100644 index 0000000..396ab83 --- /dev/null +++ b/tests/integration/targets/test_proxysql_scheduler/tasks/cleanup_test_schedulers.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we start/finish" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure no schedulers are created" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"DELETE FROM scheduler" + + - name: "{{ role_name }} | {{ current_test }} | ensure no schedulers are saved on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE SCHEDULER TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure no schedulers are loaded to runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD SCHEDULER TO RUNTIME" diff --git a/tests/integration/targets/test_proxysql_scheduler/tasks/main.yml b/tests/integration/targets/test_proxysql_scheduler/tasks/main.yml new file mode 100644 index 0000000..071a4d0 --- /dev/null +++ b/tests/integration/targets/test_proxysql_scheduler/tasks/main.yml @@ -0,0 +1,83 @@ +--- +### tests + +- name: "{{ role_name }} | test_create_using_check_mode | test create scheduler using check mode" + import_tasks: test_create_using_check_mode.yml + vars: + test_proxysql_scheduler_check_mode: true + +- name: "{{ role_name }} | test_delete_using_check_mode | test delete scheduler using check mode" + import_tasks: test_delete_using_check_mode.yml + vars: + test_proxysql_scheduler_check_mode: true + +- name: "{{ role_name }} | test_create_scheduler | test create scheduler" + import_tasks: test_create_scheduler.yml + vars: + test_proxysql_scheduler_cleanup_after_test: false +- name: "{{ role_name }} | test_create_scheduler | test idempotence of create scheduler" + import_tasks: test_create_scheduler.yml + vars: + test_proxysql_scheduler_check_idempotence: true + +- name: "{{ role_name }} | test_delete_scheduler | test delete scheduler" + import_tasks: test_delete_scheduler.yml + vars: + test_proxysql_scheduler_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_scheduler | test idempotence of delete scheduler" + import_tasks: test_delete_scheduler.yml + vars: + test_proxysql_scheduler_check_idempotence: true + +- name: "{{ role_name }} | test_create_scheduler_in_memory_only | test create scheduler in memory" + import_tasks: test_create_scheduler_in_memory_only.yml + vars: + test_proxysql_scheduler_in_memory_only: true + test_proxysql_scheduler_cleanup_after_test: false +- name: "{{ role_name }} | test_create_scheduler_in_memory_only | test idempotence of create scheduler in memory" + import_tasks: test_create_scheduler_in_memory_only.yml + vars: + test_proxysql_scheduler_in_memory_only: true + test_proxysql_scheduler_check_idempotence: true + +- name: "{{ role_name }} | test_delete_scheduler_in_memory_only | test delete scheduler in memory" + import_tasks: test_delete_scheduler_in_memory_only.yml + vars: + test_proxysql_scheduler_in_memory_only: true + test_proxysql_scheduler_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_scheduler_in_memory_only | test idempotence of delete scheduler in memory" + import_tasks: test_delete_scheduler_in_memory_only.yml + vars: + test_proxysql_scheduler_in_memory_only: true + test_proxysql_scheduler_check_idempotence: true + +- name: "{{ role_name }} | test_create_scheduler_with_delayed_persist | test create scheduler with delayed save to disk/load to runtime" + import_tasks: test_create_scheduler_with_delayed_persist.yml + vars: + test_proxysql_scheduler_in_memory_only: true + test_proxysql_scheduler_with_delayed_persist: true + test_proxysql_scheduler_cleanup_after_test: false +- name: "{{ role_name }} | test_create_scheduler_with_delayed_persist | test idempotence of create scheduler with delayed save to disk/load to runtime" + import_tasks: test_create_scheduler_with_delayed_persist.yml + vars: + test_proxysql_scheduler_in_memory_only: true + test_proxysql_scheduler_with_delayed_persist: true + test_proxysql_scheduler_check_idempotence: true + +- name: "{{ role_name }} | test_delete_scheduler_with_delayed_persist | test delete scheduler with delayed save to disk/load to runtime" + import_tasks: test_delete_scheduler_with_delayed_persist.yml + vars: + test_proxysql_scheduler_in_memory_only: true + test_proxysql_scheduler_with_delayed_persist: true + test_proxysql_scheduler_cleanup_after_test: false +- name: "{{ role_name }} | test_delete_scheduler_with_delayed_persist | test idempotence of delete scheduler with delayed save to disk/load to runtime" + import_tasks: test_delete_scheduler_with_delayed_persist.yml + vars: + test_proxysql_scheduler_in_memory_only: true + test_proxysql_scheduler_with_delayed_persist: true + test_proxysql_scheduler_check_idempotence: true + +### teardown + +- name: "{{ role_name }} | teardown | perform teardown" + import_tasks: teardown.yml diff --git a/tests/integration/targets/test_proxysql_scheduler/tasks/setup_test_scheduler.yml b/tests/integration/targets/test_proxysql_scheduler/tasks/setup_test_scheduler.yml new file mode 100644 index 0000000..da12b25 --- /dev/null +++ b/tests/integration/targets/test_proxysql_scheduler/tasks/setup_test_scheduler.yml @@ -0,0 +1,12 @@ +--- +- name: "{{ role_name }} | {{ current_test }} | ensure test scheduler is created when we start" + block: + + - name: "{{ role_name }} | {{ current_test }} | ensure test scheduler is created in memory" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"INSERT OR REPLACE INTO scheduler (interval_ms, filename) VALUES ('{{ test_interval_ms }}', '{{ test_filename}}')" + + - name: "{{ role_name }} | {{ current_test }} | ensure test scheduler is created on disk" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"SAVE SCHEDULER TO DISK" + + - name: "{{ role_name }} | {{ current_test }} | ensure test scheduler is created in runtime" + shell: mysql -uadmin -padmin -h127.0.0.1 -P6032 -BNe"LOAD SCHEDULER TO RUNTIME" diff --git a/tests/integration/targets/test_proxysql_scheduler/tasks/teardown.yml b/tests/integration/targets/test_proxysql_scheduler/tasks/teardown.yml new file mode 100644 index 0000000..0cb5ae1 --- /dev/null +++ b/tests/integration/targets/test_proxysql_scheduler/tasks/teardown.yml @@ -0,0 +1,6 @@ +--- +- name: "{{ role_name }} | teardown | uninstall proxysql" + apt: + name: proxysql + purge: true + state: absent diff --git a/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler.yml b/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler.yml new file mode 100644 index 0000000..e290d26 --- /dev/null +++ b/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_scheduler | set current test" + set_fact: + current_test: test_create_scheduler + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create scheduler reported a change" + assert: + that: + - "status is {{ test_proxysql_scheduler_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_schedulers.yml + when: test_proxysql_scheduler_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler_in_memory_only.yml b/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler_in_memory_only.yml new file mode 100644 index 0000000..d03cb77 --- /dev/null +++ b/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler_in_memory_only.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_scheduler_in_memory_only | set current test" + set_fact: + current_test: test_create_scheduler_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create scheduler reported a change" + assert: + that: + - "status is {{ test_proxysql_scheduler_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_schedulers.yml + when: test_proxysql_scheduler_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler_with_delayed_persist.yml b/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler_with_delayed_persist.yml new file mode 100644 index 0000000..ba7eec5 --- /dev/null +++ b/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_scheduler_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_create_scheduler_with_delayed_persist | set current test" + set_fact: + current_test: test_create_scheduler_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create scheduler reported a change" + assert: + that: + - "status is {{ test_proxysql_scheduler_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler did make a change in memory" + assert: + that: memory_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_schedulers.yml + when: test_proxysql_scheduler_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_using_check_mode.yml b/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_using_check_mode.yml new file mode 100644 index 0000000..bbb5de4 --- /dev/null +++ b/tests/integration/targets/test_proxysql_scheduler/tasks/test_create_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_create_using_check_mode | set current test" + set_fact: + current_test: test_create_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if create scheduler in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler in check mode didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler in check mode didn't make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm create scheduler in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_schedulers.yml diff --git a/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler.yml b/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler.yml new file mode 100644 index 0000000..b136fec --- /dev/null +++ b/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_scheduler | set current test" + set_fact: + current_test: test_delete_scheduler + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete scheduler reported a change" + assert: + that: + - "status is {{ test_proxysql_scheduler_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_schedulers.yml + when: test_proxysql_scheduler_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler_in_memory_only.yml b/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler_in_memory_only.yml new file mode 100644 index 0000000..50c58e1 --- /dev/null +++ b/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler_in_memory_only.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_delete_scheduler_in_memory_only | set current test" + set_fact: + current_test: test_delete_scheduler_in_memory_only + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete scheduler reported a change" + assert: + that: + - "status is {{ test_proxysql_scheduler_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler didn't make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler did make a change on disk" + assert: + that: disk_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler did make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_schedulers.yml + when: test_proxysql_scheduler_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler_with_delayed_persist.yml b/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler_with_delayed_persist.yml new file mode 100644 index 0000000..bdea20c --- /dev/null +++ b/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_scheduler_with_delayed_persist.yml @@ -0,0 +1,31 @@ +--- +- name: "{{ role_name }} | test_delete_scheduler_with_delayed_persist | set current test" + set_fact: + current_test: test_delete_scheduler_with_delayed_persist + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete scheduler reported a change" + assert: + that: + - "status is {{ test_proxysql_scheduler_check_idempotence|ternary('not changed', 'changed') }}" + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler did make a change in memory" + assert: + that: memory_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler did make a change on disk" + assert: + that: disk_result.stdout|length == 0 + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler did make a change to runtime" + assert: + that: runtime_result.stdout|length == 0 + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_schedulers.yml + when: test_proxysql_scheduler_cleanup_after_test diff --git a/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_using_check_mode.yml b/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_using_check_mode.yml new file mode 100644 index 0000000..96e5133 --- /dev/null +++ b/tests/integration/targets/test_proxysql_scheduler/tasks/test_delete_using_check_mode.yml @@ -0,0 +1,30 @@ +--- +- name: "{{ role_name }} | test_delete_using_check_mode | set current test" + set_fact: + current_test: test_delete_using_check_mode + +- include_tasks: base_test.yml + +### then + +- name: "{{ role_name }} | {{ current_test }} | check if delete scheduler in check mode reported a change" + assert: + that: + - status is changed + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler in check mode didn't make a change in memory" + assert: + that: memory_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler in check mode didn't make a change on disk" + assert: + that: disk_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +- name: "{{ role_name }} | {{ current_test }} | confirm delete scheduler in check mode didn't make a change to runtime" + assert: + that: runtime_result.stdout == '{{ test_interval_ms }},{{ test_filename }}' + +### perform cleanup + +- name: "{{ role_name }} | {{ current_test }} | ensure we're in a clean state when we finish" + import_tasks: cleanup_test_schedulers.yml diff --git a/tests/sanity/ignore-2.10.txt b/tests/sanity/ignore-2.10.txt new file mode 100644 index 0000000..4b958cf --- /dev/null +++ b/tests/sanity/ignore-2.10.txt @@ -0,0 +1,2 @@ +roles/proxysql/molecule/default/tests/test_default.py future-import-boilerplate +roles/proxysql/molecule/default/tests/test_default.py metaclass-boilerplate diff --git a/tests/sanity/ignore-2.11.txt b/tests/sanity/ignore-2.11.txt new file mode 100644 index 0000000..4b958cf --- /dev/null +++ b/tests/sanity/ignore-2.11.txt @@ -0,0 +1,2 @@ +roles/proxysql/molecule/default/tests/test_default.py future-import-boilerplate +roles/proxysql/molecule/default/tests/test_default.py metaclass-boilerplate diff --git a/tests/sanity/ignore-2.9.txt b/tests/sanity/ignore-2.9.txt new file mode 100644 index 0000000..4b958cf --- /dev/null +++ b/tests/sanity/ignore-2.9.txt @@ -0,0 +1,2 @@ +roles/proxysql/molecule/default/tests/test_default.py future-import-boilerplate +roles/proxysql/molecule/default/tests/test_default.py metaclass-boilerplate