From 65f482d4f4310a4a1569ce6211c77689e0645422 Mon Sep 17 00:00:00 2001 From: Rick van den Hof Date: Thu, 7 Feb 2019 11:47:15 +0100 Subject: [PATCH 01/36] Add RestoreHelper --- Dockerfile | 29 ++- README.md | 1 + examples/mongo-3-replicas-from-restore.yaml | 23 +++ mongoOperator/helpers/RestoreHelper.py | 166 ++++++++++++++++++ ...MongoClusterConfigurationSpecBackupsGCS.py | 4 + mongoOperator/services/MongoService.py | 7 + restore-from-backup-local.sh | 14 ++ tests/helpers/TestRestoreHelper.py | 103 +++++++++++ tests/test_utils.py | 3 + 9 files changed, 334 insertions(+), 16 deletions(-) create mode 100644 examples/mongo-3-replicas-from-restore.yaml create mode 100644 mongoOperator/helpers/RestoreHelper.py create mode 100755 restore-from-backup-local.sh create mode 100644 tests/helpers/TestRestoreHelper.py diff --git a/Dockerfile b/Dockerfile index 54e494c..1f1c6c6 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,31 +2,28 @@ FROM python:3.6-stretch AS base WORKDIR /usr/src/app -# install MongoDB tools -RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 2930ADAE8CAF5059EE73BB4B58712A2291FA4AD5 -RUN echo "deb http://repo.mongodb.org/apt/debian stretch/mongodb-org/3.6 main" | tee /etc/apt/sources.list.d/mongodb-org-3.6.list -RUN apt-get update -RUN DEBIAN_FRONTEND=noninteractive apt-get install -y \ - mongodb-org-tools \ - mongodb-org-shell - COPY requirements.txt ./ -RUN pip install --no-cache-dir -r requirements.txt +# install MongoDB tools +RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 2930ADAE8CAF5059EE73BB4B58712A2291FA4AD5 && \ + echo "deb http://repo.mongodb.org/apt/debian stretch/mongodb-org/3.6 main" | tee /etc/apt/sources.list.d/mongodb-org-3.6.list && \ + apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y mongodb-org-tools mongodb-org-shell && \ + pip install --no-cache-dir -r requirements.txt # This is the container build that will run the "unit tests" FROM base AS tests WORKDIR /usr/src/app COPY requirements-testing.txt ./ -RUN pip install -r requirements-testing.txt +ADD . . ARG cache=1 ARG KUBERNETES_SERVICE_HOST="localhost" ARG KUBERNETES_SERVICE_PORT=8081 -RUN mkdir -p /var/run/secrets/kubernetes.io/serviceaccount -RUN echo "unit-test" >> /var/run/secrets/kubernetes.io/serviceaccount/token -RUN echo "unit-test" >> /var/run/secrets/kubernetes.io/serviceaccount/ca.crt -ADD . . -RUN ENV_NAME=testing ASYNC_TEST_TIMEOUT=15 coverage run --source="mongoOperator" -m pytest -RUN coverage report --skip-covered --show-missing --fail-under=100 +RUN pip install -r requirements-testing.txt && \ + mkdir -p /var/run/secrets/kubernetes.io/serviceaccount && \ + echo "unit-test" >> /var/run/secrets/kubernetes.io/serviceaccount/token && \ + echo "unit-test" >> /var/run/secrets/kubernetes.io/serviceaccount/ca.crt && \ + ENV_NAME=testing ASYNC_TEST_TIMEOUT=15 coverage run --source="mongoOperator" -m pytest && \ + coverage report --skip-covered --show-missing --fail-under=100 # This is the container build statements that will create the container meant for deployment FROM base AS build diff --git a/README.md b/README.md index 2dc55e9..69f50aa 100644 --- a/README.md +++ b/README.md @@ -75,6 +75,7 @@ The following options are available to use in the `spec` section of the `yaml` c | `mongodb.replicas` | - | The amount of MongoDB replicas that should be available in the replica set. Must be an uneven positive integer and minimum 3. | | * `backups.cron` | - | The cron on which to create a backup to cloud storage. | * `backups.gcs.bucket` | - | The GCS bucket to upload the backup to. | +| `backups.gcs.restore_bucket` | - | The GCS bucket that contains the backup we wish to restore. | | `backups.gcs.prefix` | backups/ | The file name prefix for the backup file. | > Please read https://docs.mongodb.com/manual/administration/production-notes/#allocate-sufficient-ram-and-cpu for details about why setting the WiredTiger cache size is important when you change the container memory limit from the default value. diff --git a/examples/mongo-3-replicas-from-restore.yaml b/examples/mongo-3-replicas-from-restore.yaml new file mode 100644 index 0000000..4811723 --- /dev/null +++ b/examples/mongo-3-replicas-from-restore.yaml @@ -0,0 +1,23 @@ +apiVersion: "operators.ultimaker.com/v1" +kind: Mongo +metadata: + name: mongo-cluster + namespace: default +spec: + mongodb: + replicas: 3 # Must be between 3 and 50 + cpu_limit: "200m" + memory_limit: "64Mi" + backups: + cron: "0 * * * *" # every hour at 0 minutes + gcs: + bucket: "ultimaker-mongo-backups" + # Set restore_from to 'latest' to use the last backup created when initializing the replicaset. + restore_from: "latest" + # set restore_bucket if the file in restore_from is in another bucket. + # restore_bucket: + prefix: "test-backups" + serviceAccount: + secretKeyRef: + name: storage-serviceaccount + key: json diff --git a/mongoOperator/helpers/RestoreHelper.py b/mongoOperator/helpers/RestoreHelper.py new file mode 100644 index 0000000..b0c13a8 --- /dev/null +++ b/mongoOperator/helpers/RestoreHelper.py @@ -0,0 +1,166 @@ +# Copyright (c) 2018 Ultimaker +# !/usr/bin/env python +# -*- coding: utf-8 -*- +import json +import logging +import os +from base64 import b64decode +from subprocess import check_output, CalledProcessError, SubprocessError + +from datetime import datetime +from google.cloud.storage import Client as StorageClient +from google.oauth2.service_account import Credentials as ServiceCredentials +from typing import Dict, Tuple + +from mongoOperator.helpers.MongoResources import MongoResources +from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration +from mongoOperator.services.KubernetesService import KubernetesService + + +class RestoreHelper: + """ + Class responsible for handling the Restores for the Mongo cluster. + """ + DEFAULT_BACKUP_PREFIX = "backups" + BACKUP_FILE_FORMAT = "mongodb-backup-{namespace}-{name}-{date}.archive.gz" + + def __init__(self, kubernetes_service: KubernetesService): + """ + :param kubernetes_service: The kubernetes service. + """ + self.kubernetes_service = kubernetes_service + + def _getCredentials(self, cluster_object: V1MongoClusterConfiguration) -> dict: + """ + Retrieves the storage credentials for the given cluster object from the Kubernetes secret as specified in the + cluster object. + :param cluster_object: The cluster object from the YAML file. + :return: The credentials dictionary. + """ + secret_key = cluster_object.spec.backups.gcs.service_account.secret_key_ref + secret = self.kubernetes_service.getSecret(secret_key.name, cluster_object.metadata.namespace) + credentials_encoded = secret.data[secret_key.key] + credentials_json = b64decode(credentials_encoded) + return json.loads(credentials_json) + + def getLastBackup(self, cluster_object: V1MongoClusterConfiguration) -> str: + """ + Returns the filename of the last backup file in the bucket. + :param cluster_object: The cluster object from the YAML file. + :return: String containing the filename of the last backup. + """ + prefix = cluster_object.spec.backups.gcs.prefix or self.DEFAULT_BACKUP_PREFIX + return self._lastBackupFile( + credentials=self._getCredentials(cluster_object), + bucket_name=cluster_object.spec.backups.gcs.restore_bucket \ + if cluster_object.spec.backups.gcs.restore_bucket \ + else cluster_object.spec.backups.gcs.bucket, + key="{}/".format(prefix)) + + @staticmethod + def _lastBackupFile(credentials: dict, bucket_name: str, key: str) -> str: + """ + Gets the name of the last backup file in the bucket. + :param credentials: The Google cloud storage service credentials retrieved from the Kubernetes secret. + :param bucket_name: The name of the bucket. + :param key: The prefix of tha backups + :return: The location of the last backup file. + """ + credentials = ServiceCredentials.from_service_account_info(credentials) + gcs_client = StorageClient(credentials.project_id, credentials) + bucket = gcs_client.get_bucket(bucket_name) + blobs = bucket.list_blobs(prefix=key) + + last_blob = None + for blob in blobs: + logging.info("Found backup file '%s' in bucket '%s'", blob.name, bucket_name) + if last_blob is None or blob.time_created > last_blob.time_created: + last_blob = blob + + return last_blob.name if last_blob else None + + def restoreIfNeeded(self, cluster_object: V1MongoClusterConfiguration) -> bool: + """ + Checks whether a restore is requested for the cluster, looking up the restore file if + necessary. + :param cluster_object: The cluster object from the YAML file. + :return: Whether a restore was executed or not. + """ + cluster_key = (cluster_object.metadata.name, cluster_object.metadata.namespace) + if hasattr(cluster_object.spec.backups.gcs, 'restore_from'): + backup_file = cluster_object.spec.backups.gcs.restore_from + if backup_file == 'latest': + backup_file = self.getLastBackup(cluster_object) + + logging.info("Attempting to restore file %s to Cluster %s @ ns/%s.", backup_file, + cluster_object.metadata.name, cluster_object.metadata.namespace) + + self.restore(cluster_object, backup_file) + return True + + return False + + def restore(self, cluster_object: V1MongoClusterConfiguration, backup_file: str): + """ + Attempts to restore the latest backup in the specified location to the given cluster. + Creates a new backup for the given cluster saving it in the cloud storage. + :param cluster_object: The cluster object from the YAML file. + :param backup_file: The filename of the backup we want to restore. + """ + pod_index = cluster_object.spec.mongodb.replicas - 1 # take last pod + hostname = MongoResources.getMemberHostname(pod_index, cluster_object.metadata.name, + cluster_object.metadata.namespace) + + logging.info("Restoring backup file %s to cluster %s @ ns/%s on %s.", backup_file, + cluster_object.metadata.name, cluster_object.metadata.namespace, hostname) + + # Download the backup file from the bucket + downloaded_file = self._downloadBackup(cluster_object, backup_file) + + try: + restore_output = check_output(["mongorestore", "--host", hostname, "--gzip", "--archive", + downloaded_file]) + except CalledProcessError as err: + raise SubprocessError("Could not restore '{}' to '{}'. Return code: {}\n stderr: '{}'\n stdout: '{}'" + .format(backup_file, hostname, err.returncode, err.stderr, err.stdout)) + + logging.debug("Restore output: %s", restore_output) + + os.remove(downloaded_file) + + def _downloadBackup(self, cluster_object: V1MongoClusterConfiguration, backup_file: str) -> str: + """ + Downloads the backup file from cloud storage. + :param cluster_object: The cluster object from the YAML file. + :param backup_file: The file name of the backup to download. + :return: The location of the downloaded file. + """ + prefix = cluster_object.spec.backups.gcs.prefix or self.DEFAULT_BACKUP_PREFIX + return self._downloadFile( + credentials=self._getCredentials(cluster_object), + bucket_name=cluster_object.spec.backups.gcs.restore_bucket \ + if cluster_object.spec.backups.gcs.restore_bucket \ + else cluster_object.spec.backups.gcs.bucket, + key="{}/{}".format(prefix, backup_file), + file_name="/tmp/" + backup_file + ) + + @staticmethod + def _downloadFile(credentials: dict, bucket_name: str, key: str, file_name: str) -> str: + """ + Downloads a file from cloud storage. + :param credentials: The Google cloud storage service credentials retrieved from the Kubernetes secret. + :param bucket_name: The name of the bucket. + :param key: The key to download the file from the cloud storage. + :param file_name: The file that will be downloaded. + :return: The location of the downloaded file. + """ + credentials = ServiceCredentials.from_service_account_info(credentials) + gcs_client = StorageClient(credentials.project_id, credentials) + bucket = gcs_client.get_bucket(bucket_name) + bucket.blob(key).download_to_filename(file_name) + print(repr(credentials)) + print(repr(bucket_name)) + + logging.info("Backup gcs://%s/%s downloaded to %s", bucket_name, key, file_name) + return file_name diff --git a/mongoOperator/models/V1MongoClusterConfigurationSpecBackupsGCS.py b/mongoOperator/models/V1MongoClusterConfigurationSpecBackupsGCS.py index 9d01438..8cefc67 100644 --- a/mongoOperator/models/V1MongoClusterConfigurationSpecBackupsGCS.py +++ b/mongoOperator/models/V1MongoClusterConfigurationSpecBackupsGCS.py @@ -14,3 +14,7 @@ class V1MongoClusterConfigurationSpecBackupsGCS(BaseModel): bucket = StringField(required=True) prefix = StringField(required=False) service_account = EmbeddedField(V1ServiceAccountRef, required=True) + + # When initializing a new ReplicaSet, load the data from this filename and bucket. + restore_from = StringField(required=False) + restore_bucket = StringField(required=False) diff --git a/mongoOperator/services/MongoService.py b/mongoOperator/services/MongoService.py index f51fb86..31f72d3 100644 --- a/mongoOperator/services/MongoService.py +++ b/mongoOperator/services/MongoService.py @@ -10,6 +10,8 @@ from mongoOperator.helpers.AdminSecretChecker import AdminSecretChecker from mongoOperator.helpers.MongoResources import MongoResources +from mongoOperator.helpers.RestoreHelper import RestoreHelper + from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration from mongoOperator.services.KubernetesService import KubernetesService @@ -27,6 +29,8 @@ class MongoService: def __init__(self, kubernetes_service: KubernetesService): self.kubernetes_service = kubernetes_service + self.restore_helper = RestoreHelper(self.kubernetes_service) + def _execInPod(self, pod_index: int, name: str, namespace: str, mongo_command: str) -> Dict[str, any]: """ @@ -84,6 +88,9 @@ def initializeReplicaSet(self, cluster_object: V1MongoClusterConfiguration) -> N if create_replica_response["ok"] == 1: logging.info("Initialized replica set %s @ ns/%s", cluster_name, namespace) + + # If restore was specified, load restore file + self.restore_helper.restoreIfNeeded(cluster_object) else: raise ValueError("Unexpected response initializing replica set {} @ ns/{}:\n{}" .format(cluster_name, namespace, create_replica_response)) diff --git a/restore-from-backup-local.sh b/restore-from-backup-local.sh new file mode 100755 index 0000000..e139326 --- /dev/null +++ b/restore-from-backup-local.sh @@ -0,0 +1,14 @@ +#!/usr/bin/env bash + +POD_NAME=$(kubectl get pods | grep -e "mongo-operator.*Running" | cut --fields=1 --delimiter=" ") +if [ -z $POD_NAME ]; then + echo "The operator pod is not running!" + kubectl get pods + exit 1 +fi + +# apply the example file +kubectl apply --filename=examples/mongo-3-replicas-from-restore.yaml + +# show the pod logs +kubectl logs ${POD_NAME} --follow diff --git a/tests/helpers/TestRestoreHelper.py b/tests/helpers/TestRestoreHelper.py new file mode 100644 index 0000000..f304a29 --- /dev/null +++ b/tests/helpers/TestRestoreHelper.py @@ -0,0 +1,103 @@ +# Copyright (c) 2018 Ultimaker +# !/usr/bin/env python +# -*- coding: utf-8 -*- +import json +from base64 import b64encode + +from kubernetes.client import V1Secret +from subprocess import CalledProcessError, SubprocessError + +from datetime import datetime +from unittest import TestCase +from unittest.mock import MagicMock, patch, call + +from mongoOperator.helpers.RestoreHelper import RestoreHelper +from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration +from tests.test_utils import getExampleClusterDefinitionWithRestore + + +class TestRestoreHelper(TestCase): + def setUp(self): + self.cluster_dict = getExampleClusterDefinitionWithRestore() + self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) + self.kubernetes_service = MagicMock() + self.restore_helper = RestoreHelper(self.kubernetes_service) + + self.dummy_credentials = b64encode(json.dumps({"user": "password"}).encode()) + self.kubernetes_service.getSecret.return_value = V1Secret(data={"json": self.dummy_credentials}) + + @patch("mongoOperator.helpers.RestoreHelper.StorageClient") + @patch("mongoOperator.helpers.RestoreHelper.ServiceCredentials") + @patch("mongoOperator.helpers.RestoreHelper.RestoreHelper.restore") + def test_restoreIfNeeded(self, restore_mock, gcs_service_mock, storage_mock): + class MockBlob: + name = 'somebackupfile.gz' + #storage_mock.get_bucket.return_value = "foo" + storage_mock.get_bucket.return_value.list_blobs.return_value = [MockBlob()] + + self.restore_helper.restoreIfNeeded(self.cluster_object) + + restore_mock.assert_called_once_with(self.cluster_object, 'somebackupfile.gz') + + storage_mock.bucket.assert_called_once_with('ultimaker-mongo-backups') + + @patch("mongoOperator.helpers.RestoreHelper.os") + @patch("mongoOperator.helpers.RestoreHelper.StorageClient") + @patch("mongoOperator.helpers.RestoreHelper.ServiceCredentials") + @patch("mongoOperator.helpers.RestoreHelper.check_output") + def test_restore(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock): + current_date = datetime(2018, 2, 28, 14, 0, 0) + expected_backup_name = "mongodb-backup-default-mongo-cluster-2018-02-28_140000.archive.gz" + + self.restore_helper.restore(self.cluster_object, expected_backup_name) + + self.assertEqual([call.getSecret('storage-serviceaccount', 'default')], self.kubernetes_service.mock_calls) + + subprocess_mock.assert_called_once_with([ + 'mongorestore', '--host', 'mongo-cluster-2.mongo-cluster.default.svc.cluster.local', '--gzip', + '--archive', '/tmp/' + expected_backup_name + ]) + + expected_service_call = call.from_service_account_info({'user': 'password'}) + self.assertEqual([expected_service_call], gcs_service_mock.mock_calls) + + expected_storage_calls = [ + call(gcs_service_mock.from_service_account_info.return_value.project_id, + gcs_service_mock.from_service_account_info.return_value), + call().bucket('ultimaker-mongo-backups'), + call().bucket().blob('test-backups/' + expected_backup_name), + call().bucket().blob().download_to_filename('/tmp/' + expected_backup_name), + ] + self.assertEqual(expected_storage_calls, storage_mock.mock_calls) + + expected_os_call = call.remove('/tmp/' + expected_backup_name) + self.assertEqual([expected_os_call], os_mock.mock_calls) + + @patch("mongoOperator.helpers.RestoreHelper.os") + @patch("mongoOperator.helpers.RestoreHelper.StorageClient") + @patch("mongoOperator.helpers.RestoreHelper.ServiceCredentials") + @patch("mongoOperator.helpers.RestoreHelper.check_output") + def test_restore_mongo_error(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock): + subprocess_mock.side_effect = CalledProcessError(3, "cmd", "output", "error") + expected_backup_name = "mongodb-backup-default-mongo-cluster-2018-02-28_140000.archive.gz" + + current_date = datetime(2018, 2, 28, 14, 0, 0) + + with self.assertRaises(SubprocessError) as context: + self.restore_helper.restore(self.cluster_object, expected_backup_name) + + self.assertEqual("Could not restore " + "'" + expected_backup_name + "'" + " to " + "'mongo-cluster-2.mongo-cluster.default.svc.cluster.local'. " + "Return code: 3\n stderr: 'error'\n stdout: 'output'", + str(context.exception)) + + self.assertEqual(1, subprocess_mock.call_count) + + @patch("mongoOperator.helpers.RestoreHelper.check_output") + def test_restore_gcs_bad_credentials(self, subprocess_mock): + expected_backup_name = "mongodb-backup-default-mongo-cluster-2018-02-28_140000.archive.gz" + with self.assertRaises(ValueError) as context: + self.restore_helper.restore(self.cluster_object, expected_backup_name) + self.assertIn("Service account info was not in the expected format", str(context.exception)) diff --git a/tests/test_utils.py b/tests/test_utils.py index 52a4bf7..033ba84 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -8,6 +8,9 @@ def getExampleClusterDefinition(replicas = 3) -> dict: with open("./examples/mongo-{}-replicas.yaml".format(replicas)) as f: return yaml.load(f) +def getExampleClusterDefinitionWithRestore() -> dict: + with open("./examples/mongo-3-replicas-from-restore.yaml") as f: + return yaml.load(f) def dict_eq(one, other): # [(k, getattr(self, k), getattr(other, k)) for k in self.__dict__ if getattr(self, k) != getattr(other, k)] From 55946f93951b8a43a53081f63b659ea9b9f51922 Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Thu, 7 Feb 2019 12:28:40 +0100 Subject: [PATCH 02/36] Fix restoreIfNeeded test --- Dockerfile | 6 +- mongoOperator/helpers/RestoreHelper.py | 12 ++- tests/helpers/TestRestoreHelper.py | 133 +++++++++++++------------ 3 files changed, 82 insertions(+), 69 deletions(-) diff --git a/Dockerfile b/Dockerfile index 1f1c6c6..e4ca893 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,15 +14,15 @@ RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 2930ADAE8CAF505 FROM base AS tests WORKDIR /usr/src/app COPY requirements-testing.txt ./ -ADD . . ARG cache=1 ARG KUBERNETES_SERVICE_HOST="localhost" ARG KUBERNETES_SERVICE_PORT=8081 RUN pip install -r requirements-testing.txt && \ mkdir -p /var/run/secrets/kubernetes.io/serviceaccount && \ echo "unit-test" >> /var/run/secrets/kubernetes.io/serviceaccount/token && \ - echo "unit-test" >> /var/run/secrets/kubernetes.io/serviceaccount/ca.crt && \ - ENV_NAME=testing ASYNC_TEST_TIMEOUT=15 coverage run --source="mongoOperator" -m pytest && \ + echo "unit-test" >> /var/run/secrets/kubernetes.io/serviceaccount/ca.crt +ADD . . +RUN ENV_NAME=testing ASYNC_TEST_TIMEOUT=15 coverage run --source="mongoOperator" -m pytest && \ coverage report --skip-covered --show-missing --fail-under=100 # This is the container build statements that will create the container meant for deployment diff --git a/mongoOperator/helpers/RestoreHelper.py b/mongoOperator/helpers/RestoreHelper.py index b0c13a8..a1ef89e 100644 --- a/mongoOperator/helpers/RestoreHelper.py +++ b/mongoOperator/helpers/RestoreHelper.py @@ -50,12 +50,13 @@ def getLastBackup(self, cluster_object: V1MongoClusterConfiguration) -> str: :return: String containing the filename of the last backup. """ prefix = cluster_object.spec.backups.gcs.prefix or self.DEFAULT_BACKUP_PREFIX + bucket_name = cluster_object.spec.backups.gcs.restore_bucket if cluster_object.spec.backups.gcs.restore_bucket \ + else cluster_object.spec.backups.gcs.bucket return self._lastBackupFile( credentials=self._getCredentials(cluster_object), - bucket_name=cluster_object.spec.backups.gcs.restore_bucket \ - if cluster_object.spec.backups.gcs.restore_bucket \ - else cluster_object.spec.backups.gcs.bucket, - key="{}/".format(prefix)) + bucket_name=bucket_name, + key="{}/".format(prefix) + ) @staticmethod def _lastBackupFile(credentials: dict, bucket_name: str, key: str) -> str: @@ -87,8 +88,9 @@ def restoreIfNeeded(self, cluster_object: V1MongoClusterConfiguration) -> bool: :return: Whether a restore was executed or not. """ cluster_key = (cluster_object.metadata.name, cluster_object.metadata.namespace) - if hasattr(cluster_object.spec.backups.gcs, 'restore_from'): + if hasattr(cluster_object.spec.backups.gcs, "restore_from"): backup_file = cluster_object.spec.backups.gcs.restore_from + print("backup_file", backup_file) if backup_file == 'latest': backup_file = self.getLastBackup(cluster_object) diff --git a/tests/helpers/TestRestoreHelper.py b/tests/helpers/TestRestoreHelper.py index f304a29..fc9b721 100644 --- a/tests/helpers/TestRestoreHelper.py +++ b/tests/helpers/TestRestoreHelper.py @@ -16,7 +16,15 @@ from tests.test_utils import getExampleClusterDefinitionWithRestore +class MockBlob: + """ + Mock implementation of storage Blob. + """ + name = "somebackupfile.gz" + + class TestRestoreHelper(TestCase): + def setUp(self): self.cluster_dict = getExampleClusterDefinitionWithRestore() self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) @@ -30,74 +38,77 @@ def setUp(self): @patch("mongoOperator.helpers.RestoreHelper.ServiceCredentials") @patch("mongoOperator.helpers.RestoreHelper.RestoreHelper.restore") def test_restoreIfNeeded(self, restore_mock, gcs_service_mock, storage_mock): - class MockBlob: - name = 'somebackupfile.gz' - #storage_mock.get_bucket.return_value = "foo" - storage_mock.get_bucket.return_value.list_blobs.return_value = [MockBlob()] + get_bucket_mock = storage_mock.return_value.get_bucket + get_bucket_mock.return_value.list_blobs.return_value = iter([MockBlob()]) self.restore_helper.restoreIfNeeded(self.cluster_object) - restore_mock.assert_called_once_with(self.cluster_object, 'somebackupfile.gz') - - storage_mock.bucket.assert_called_once_with('ultimaker-mongo-backups') - - @patch("mongoOperator.helpers.RestoreHelper.os") - @patch("mongoOperator.helpers.RestoreHelper.StorageClient") - @patch("mongoOperator.helpers.RestoreHelper.ServiceCredentials") - @patch("mongoOperator.helpers.RestoreHelper.check_output") - def test_restore(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock): - current_date = datetime(2018, 2, 28, 14, 0, 0) - expected_backup_name = "mongodb-backup-default-mongo-cluster-2018-02-28_140000.archive.gz" - - self.restore_helper.restore(self.cluster_object, expected_backup_name) - self.assertEqual([call.getSecret('storage-serviceaccount', 'default')], self.kubernetes_service.mock_calls) - subprocess_mock.assert_called_once_with([ - 'mongorestore', '--host', 'mongo-cluster-2.mongo-cluster.default.svc.cluster.local', '--gzip', - '--archive', '/tmp/' + expected_backup_name - ]) - expected_service_call = call.from_service_account_info({'user': 'password'}) self.assertEqual([expected_service_call], gcs_service_mock.mock_calls) - expected_storage_calls = [ - call(gcs_service_mock.from_service_account_info.return_value.project_id, - gcs_service_mock.from_service_account_info.return_value), - call().bucket('ultimaker-mongo-backups'), - call().bucket().blob('test-backups/' + expected_backup_name), - call().bucket().blob().download_to_filename('/tmp/' + expected_backup_name), - ] - self.assertEqual(expected_storage_calls, storage_mock.mock_calls) + get_bucket_mock.assert_called_once_with('ultimaker-mongo-backups') - expected_os_call = call.remove('/tmp/' + expected_backup_name) - self.assertEqual([expected_os_call], os_mock.mock_calls) + restore_mock.assert_called_once_with(self.cluster_object, 'somebackupfile.gz') - @patch("mongoOperator.helpers.RestoreHelper.os") - @patch("mongoOperator.helpers.RestoreHelper.StorageClient") - @patch("mongoOperator.helpers.RestoreHelper.ServiceCredentials") - @patch("mongoOperator.helpers.RestoreHelper.check_output") - def test_restore_mongo_error(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock): - subprocess_mock.side_effect = CalledProcessError(3, "cmd", "output", "error") - expected_backup_name = "mongodb-backup-default-mongo-cluster-2018-02-28_140000.archive.gz" - - current_date = datetime(2018, 2, 28, 14, 0, 0) - - with self.assertRaises(SubprocessError) as context: - self.restore_helper.restore(self.cluster_object, expected_backup_name) - - self.assertEqual("Could not restore " - "'" + expected_backup_name + "'" - " to " - "'mongo-cluster-2.mongo-cluster.default.svc.cluster.local'. " - "Return code: 3\n stderr: 'error'\n stdout: 'output'", - str(context.exception)) - - self.assertEqual(1, subprocess_mock.call_count) - - @patch("mongoOperator.helpers.RestoreHelper.check_output") - def test_restore_gcs_bad_credentials(self, subprocess_mock): - expected_backup_name = "mongodb-backup-default-mongo-cluster-2018-02-28_140000.archive.gz" - with self.assertRaises(ValueError) as context: - self.restore_helper.restore(self.cluster_object, expected_backup_name) - self.assertIn("Service account info was not in the expected format", str(context.exception)) + # @patch("mongoOperator.helpers.RestoreHelper.os") + # @patch("mongoOperator.helpers.RestoreHelper.StorageClient") + # @patch("mongoOperator.helpers.RestoreHelper.ServiceCredentials") + # @patch("mongoOperator.helpers.RestoreHelper.check_output") + # def test_restore(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock): + # current_date = datetime(2018, 2, 28, 14, 0, 0) + # expected_backup_name = "mongodb-backup-default-mongo-cluster-2018-02-28_140000.archive.gz" + # + # self.restore_helper.restore(self.cluster_object, expected_backup_name) + # + # self.assertEqual([call.getSecret('storage-serviceaccount', 'default')], self.kubernetes_service.mock_calls) + # + # subprocess_mock.assert_called_once_with([ + # 'mongorestore', '--host', 'mongo-cluster-2.mongo-cluster.default.svc.cluster.local', '--gzip', + # '--archive', '/tmp/' + expected_backup_name + # ]) + # + # expected_service_call = call.from_service_account_info({'user': 'password'}) + # self.assertEqual([expected_service_call], gcs_service_mock.mock_calls) + # + # expected_storage_calls = [ + # call(gcs_service_mock.from_service_account_info.return_value.project_id, + # gcs_service_mock.from_service_account_info.return_value), + # call().bucket('ultimaker-mongo-backups'), + # call().bucket().blob('test-backups/' + expected_backup_name), + # call().bucket().blob().download_to_filename('/tmp/' + expected_backup_name), + # ] + # self.assertEqual(expected_storage_calls, storage_mock.mock_calls) + # + # expected_os_call = call.remove('/tmp/' + expected_backup_name) + # self.assertEqual([expected_os_call], os_mock.mock_calls) + # + # @patch("mongoOperator.helpers.RestoreHelper.os") + # @patch("mongoOperator.helpers.RestoreHelper.StorageClient") + # @patch("mongoOperator.helpers.RestoreHelper.ServiceCredentials") + # @patch("mongoOperator.helpers.RestoreHelper.check_output") + # def test_restore_mongo_error(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock): + # subprocess_mock.side_effect = CalledProcessError(3, "cmd", "output", "error") + # expected_backup_name = "mongodb-backup-default-mongo-cluster-2018-02-28_140000.archive.gz" + # + # current_date = datetime(2018, 2, 28, 14, 0, 0) + # + # with self.assertRaises(SubprocessError) as context: + # self.restore_helper.restore(self.cluster_object, expected_backup_name) + # + # self.assertEqual("Could not restore " + # "'" + expected_backup_name + "'" + # " to " + # "'mongo-cluster-2.mongo-cluster.default.svc.cluster.local'. " + # "Return code: 3\n stderr: 'error'\n stdout: 'output'", + # str(context.exception)) + # + # self.assertEqual(1, subprocess_mock.call_count) + # + # @patch("mongoOperator.helpers.RestoreHelper.check_output") + # def test_restore_gcs_bad_credentials(self, subprocess_mock): + # expected_backup_name = "mongodb-backup-default-mongo-cluster-2018-02-28_140000.archive.gz" + # with self.assertRaises(ValueError) as context: + # self.restore_helper.restore(self.cluster_object, expected_backup_name) + # self.assertIn("Service account info was not in the expected format", str(context.exception)) From e4283d0365491f0f816dd01678a4273432be823a Mon Sep 17 00:00:00 2001 From: Rick van den Hof Date: Wed, 13 Feb 2019 16:43:53 +0100 Subject: [PATCH 03/36] Work in progress --- Dockerfile | 2 +- Dockerfile.local | 23 + ...yaml => mongo-3-replicas-from-backup.yaml} | 2 +- examples/mongo-3-replicas.yaml | 2 +- examples/mongo-5-replicas.yaml | 2 +- .../mongo-operator/cluster-role-binding.yaml | 4 +- .../mongo-operator/cluster-role.yaml | 2 +- .../operators/mongo-operator/deployment.yaml | 1 + .../mongo-operator/service-account.yaml | 1 + mongoOperator/helpers/MongoMonitoring.py | 119 +++++ mongoOperator/helpers/MongoResources.py | 90 +--- mongoOperator/helpers/RestoreHelper.py | 52 +- mongoOperator/services/KubernetesService.py | 12 - mongoOperator/services/MongoService.py | 167 +++--- requirements.txt | 4 +- restore-from-backup-local.sh | 42 +- .../mongo_responses/createUser-exists.txt | 7 - .../mongo_responses/createUser-notMaster.txt | 7 - .../mongo_responses/createUser-ok.json | 1 + .../mongo_responses/createUser-ok.txt | 13 - .../mongo_responses/initiate-not-found.txt | 9 - .../fixtures/mongo_responses/initiate-ok.json | 1 + .../fixtures/mongo_responses/initiate-ok.txt | 14 - .../mongo_responses/replica-status-error.txt | 7 - .../replica-status-not-initialized.txt | 10 - .../mongo_responses/replica-status-ok.json | 179 +++++++ .../mongo_responses/replica-status-ok.txt | 102 ---- tests/helpers/TestAdminSecretChecker.py | 4 +- tests/helpers/TestBackupChecker.py | 16 +- tests/helpers/TestBaseResourceChecker.py | 11 +- tests/helpers/TestClusterChecker.py | 68 ++- tests/helpers/TestMongoMonitoring.py | 50 ++ tests/helpers/TestMongoResources.py | 126 ----- tests/helpers/TestRestoreHelper.py | 152 +++--- .../models/TestV1MongoClusterConfiguration.py | 2 +- tests/services/TestKubernetesService.py | 15 +- tests/services/TestMongoService.py | 479 ++++++++++-------- tests/test_utils.py | 2 +- 38 files changed, 965 insertions(+), 835 deletions(-) create mode 100644 Dockerfile.local rename examples/{mongo-3-replicas-from-restore.yaml => mongo-3-replicas-from-backup.yaml} (94%) create mode 100644 mongoOperator/helpers/MongoMonitoring.py delete mode 100644 tests/fixtures/mongo_responses/createUser-exists.txt delete mode 100644 tests/fixtures/mongo_responses/createUser-notMaster.txt create mode 100644 tests/fixtures/mongo_responses/createUser-ok.json delete mode 100644 tests/fixtures/mongo_responses/createUser-ok.txt delete mode 100644 tests/fixtures/mongo_responses/initiate-not-found.txt create mode 100644 tests/fixtures/mongo_responses/initiate-ok.json delete mode 100644 tests/fixtures/mongo_responses/initiate-ok.txt delete mode 100644 tests/fixtures/mongo_responses/replica-status-error.txt delete mode 100644 tests/fixtures/mongo_responses/replica-status-not-initialized.txt create mode 100644 tests/fixtures/mongo_responses/replica-status-ok.json delete mode 100644 tests/fixtures/mongo_responses/replica-status-ok.txt create mode 100644 tests/helpers/TestMongoMonitoring.py delete mode 100644 tests/helpers/TestMongoResources.py diff --git a/Dockerfile b/Dockerfile index e4ca893..982895f 100644 --- a/Dockerfile +++ b/Dockerfile @@ -22,7 +22,7 @@ RUN pip install -r requirements-testing.txt && \ echo "unit-test" >> /var/run/secrets/kubernetes.io/serviceaccount/token && \ echo "unit-test" >> /var/run/secrets/kubernetes.io/serviceaccount/ca.crt ADD . . -RUN ENV_NAME=testing ASYNC_TEST_TIMEOUT=15 coverage run --source="mongoOperator" -m pytest && \ +RUN ENV_NAME=testing ASYNC_TEST_TIMEOUT=15 coverage run --source="mongoOperator" -m pytest -vvx && \ coverage report --skip-covered --show-missing --fail-under=100 # This is the container build statements that will create the container meant for deployment diff --git a/Dockerfile.local b/Dockerfile.local new file mode 100644 index 0000000..ef7f638 --- /dev/null +++ b/Dockerfile.local @@ -0,0 +1,23 @@ +# Copyright (c) 2018 Ultimaker B.V. +FROM python:3.6-stretch AS base +WORKDIR /usr/src/app + +COPY requirements*.txt ./ +# install MongoDB tools +RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 2930ADAE8CAF5059EE73BB4B58712A2291FA4AD5 && \ + echo "deb http://repo.mongodb.org/apt/debian stretch/mongodb-org/3.6 main" | tee /etc/apt/sources.list.d/mongodb-org-3.6.list && \ + apt-get update && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y mongodb-org-tools mongodb-org-shell + +RUN pip install --no-cache-dir -r requirements.txt +RUN pip install --no-cache-dir -r requirements-testing.txt + +ARG cache=1 +ARG KUBERNETES_SERVICE_HOST="localhost" +ARG KUBERNETES_SERVICE_PORT=8081 +RUN mkdir -p /var/run/secrets/kubernetes.io/serviceaccount && \ + echo "unit-test" >> /var/run/secrets/kubernetes.io/serviceaccount/token && \ + echo "unit-test" >> /var/run/secrets/kubernetes.io/serviceaccount/ca.crt +ADD . . + +ENTRYPOINT ["python", "./main.py"] diff --git a/examples/mongo-3-replicas-from-restore.yaml b/examples/mongo-3-replicas-from-backup.yaml similarity index 94% rename from examples/mongo-3-replicas-from-restore.yaml rename to examples/mongo-3-replicas-from-backup.yaml index 4811723..a69b78d 100644 --- a/examples/mongo-3-replicas-from-restore.yaml +++ b/examples/mongo-3-replicas-from-backup.yaml @@ -2,7 +2,7 @@ apiVersion: "operators.ultimaker.com/v1" kind: Mongo metadata: name: mongo-cluster - namespace: default + namespace: mongo-operator-cluster spec: mongodb: replicas: 3 # Must be between 3 and 50 diff --git a/examples/mongo-3-replicas.yaml b/examples/mongo-3-replicas.yaml index 061db27..cd7115f 100644 --- a/examples/mongo-3-replicas.yaml +++ b/examples/mongo-3-replicas.yaml @@ -2,7 +2,7 @@ apiVersion: "operators.ultimaker.com/v1" kind: Mongo metadata: name: mongo-cluster - namespace: default + namespace: mongo-operator-cluster spec: mongodb: replicas: 3 # Must be between 3 and 50 diff --git a/examples/mongo-5-replicas.yaml b/examples/mongo-5-replicas.yaml index 697eb0e..eda315a 100644 --- a/examples/mongo-5-replicas.yaml +++ b/examples/mongo-5-replicas.yaml @@ -2,7 +2,7 @@ apiVersion: "operators.ultimaker.com/v1" kind: Mongo metadata: name: mongo-cluster - namespace: default + namespace: mongo-operator-cluster spec: mongodb: replicas: 5 # Must be between 3 and 50 diff --git a/kubernetes/operators/mongo-operator/cluster-role-binding.yaml b/kubernetes/operators/mongo-operator/cluster-role-binding.yaml index d0a5949..1a447e1 100644 --- a/kubernetes/operators/mongo-operator/cluster-role-binding.yaml +++ b/kubernetes/operators/mongo-operator/cluster-role-binding.yaml @@ -2,11 +2,11 @@ kind: ClusterRoleBinding apiVersion: rbac.authorization.k8s.io/v1beta1 metadata: name: mongo-operator-cluster-role-binding - namespace: default + namespace: mongo-operator-cluster subjects: - kind: ServiceAccount name: mongo-operator-service-account - namespace: default + namespace: mongo-operator-cluster roleRef: kind: ClusterRole name: mongo-operator-cluster-role diff --git a/kubernetes/operators/mongo-operator/cluster-role.yaml b/kubernetes/operators/mongo-operator/cluster-role.yaml index 74c1968..1feb3b8 100644 --- a/kubernetes/operators/mongo-operator/cluster-role.yaml +++ b/kubernetes/operators/mongo-operator/cluster-role.yaml @@ -2,7 +2,7 @@ apiVersion: rbac.authorization.k8s.io/v1beta1 kind: ClusterRole metadata: name: mongo-operator-cluster-role - namespace: default + namespace: mongo-operator-cluster rules: - apiGroups: [""] resources: ["services"] diff --git a/kubernetes/operators/mongo-operator/deployment.yaml b/kubernetes/operators/mongo-operator/deployment.yaml index 681db8c..bcf725e 100644 --- a/kubernetes/operators/mongo-operator/deployment.yaml +++ b/kubernetes/operators/mongo-operator/deployment.yaml @@ -4,6 +4,7 @@ metadata: labels: app: mongo-operator name: mongo-operator + namespace: mongo-operator-cluster spec: replicas: 1 revisionHistoryLimit: 2 diff --git a/kubernetes/operators/mongo-operator/service-account.yaml b/kubernetes/operators/mongo-operator/service-account.yaml index 57c9c3e..1f7253f 100644 --- a/kubernetes/operators/mongo-operator/service-account.yaml +++ b/kubernetes/operators/mongo-operator/service-account.yaml @@ -2,3 +2,4 @@ apiVersion: v1 kind: ServiceAccount metadata: name: mongo-operator-service-account + namespace: mongo-operator-cluster diff --git a/mongoOperator/helpers/MongoMonitoring.py b/mongoOperator/helpers/MongoMonitoring.py new file mode 100644 index 0000000..a04e19b --- /dev/null +++ b/mongoOperator/helpers/MongoMonitoring.py @@ -0,0 +1,119 @@ +# Copyright (c) 2018 Ultimaker +# !/usr/bin/env python +# -*- coding: utf-8 -*- +from typing import Callable + +from pymongo import monitoring +from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration + +import logging + + +class CommandLogger(monitoring.CommandListener): + + def started(self, event): + logging.debug("Command {0.command_name} with request id " + "{0.request_id} started on server " + "{0.connection_id}".format(event)) + + def succeeded(self, event): + logging.debug("Command {0.command_name} with request id " + "{0.request_id} on server {0.connection_id} " + "succeeded in {0.duration_micros} " + "microseconds".format(event)) + + def failed(self, event): + logging.debug("Command {0.command_name} with request id " + "{0.request_id} on server {0.connection_id} " + "failed in {0.duration_micros} " + "microseconds".format(event)) + + +class ServerLogger(monitoring.ServerListener): + + def opened(self, event): + logging.debug("Server {0.server_address} added to topology " + "{0.topology_id}".format(event)) + + def description_changed(self, event): + previous_server_type = event.previous_description.server_type + new_server_type = event.new_description.server_type + if new_server_type != previous_server_type: + # server_type_name was added in PyMongo 3.4 + logging.debug( + "Server {0.server_address} changed type from " + "{0.previous_description.server_type_name} to " + "{0.new_description.server_type_name}".format(event)) + + def closed(self, event): + logging.debug("Server {0.server_address} removed from topology " + "{0.topology_id}".format(event)) + + +class HeartbeatLogger(monitoring.ServerHeartbeatListener): + def __init__(self, cluster_object, all_hosts_ready_callback: Callable[[V1MongoClusterConfiguration], None]) -> None: + self._cluster_object = cluster_object + self._expected_host_count = cluster_object.spec.mongodb.replicas + self._hosts = {} + self._all_hosts_ready_callback = all_hosts_ready_callback + self._callback_executed = False + + def started(self, event): + logging.debug("Heartbeat sent to server " + "{0.connection_id}".format(event)) + self._hosts[event.connection_id] = 0 + + def succeeded(self, event): + # The reply.document attribute was added in PyMongo 3.4. + logging.debug("Heartbeat to server {0.connection_id} " + "succeeded with reply " + "{0.reply.document}".format(event)) + self._hosts[event.connection_id] = 1 + + if len(list(filter(lambda x: self._hosts[x] == 1, self._hosts))) == self._expected_host_count: + if not self._callback_executed and "info" in event.reply.document and event.reply.document["info"] == \ + "Does not have a valid replica set config": + self._all_hosts_ready_callback(self._cluster_object) + self._callback_executed = True + + def failed(self, event): + logging.warning("Heartbeat to server {0.connection_id} " + "failed with error {0.reply}".format(event)) + self._hosts[event.connection_id] = -1 + + +class TopologyLogger(monitoring.TopologyListener): + + def __init__(self, cluster_object, replica_set_ready_callback: Callable[[V1MongoClusterConfiguration], None]) -> None: + self._cluster_object = cluster_object + self._replica_set_ready_callback = replica_set_ready_callback + + def opened(self, event): + logging.debug("Topology with id {0.topology_id} " + "opened".format(event)) + + def description_changed(self, event): + logging.debug("Topology description updated for " + "topology id {0.topology_id}".format(event)) + previous_topology_type = event.previous_description.topology_type + new_topology_type = event.new_description.topology_type + if new_topology_type != previous_topology_type: + # topology_type_name was added in PyMongo 3.4 + logging.debug( + "Topology {0.topology_id} changed type from " + "{0.previous_description.topology_type_name} to " + "{0.new_description.topology_type_name}".format(event)) + # The has_writable_server and has_readable_server methods + # were added in PyMongo 3.4. + if not event.new_description.has_writable_server(): + logging.debug("No writable servers available.") + if not event.new_description.has_readable_server(): + logging.debug("No readable servers available.") + + if event.new_description.has_writable_server(): + self._replica_set_ready_callback(self._cluster_object) + + def closed(self, event): + logging.debug("Topology with id {0.topology_id} " + "closed".format(event)) + diff --git a/mongoOperator/helpers/MongoResources.py b/mongoOperator/helpers/MongoResources.py index 188aaac..8423216 100644 --- a/mongoOperator/helpers/MongoResources.py +++ b/mongoOperator/helpers/MongoResources.py @@ -7,7 +7,7 @@ import re from base64 import b64decode -from typing import List, Dict +from typing import List, Dict, Tuple, Any, Union from kubernetes import client @@ -31,43 +31,28 @@ def getMemberHostname(cls, pod_index, cluster_name, namespace) -> str: return "{}-{}.{}.{}.svc.cluster.local".format(cluster_name, pod_index, cluster_name, namespace) @classmethod - def createMongoExecCommand(cls, mongo_command: str) -> List[str]: - """ - Creates a command that can be executed in Kubernetes (`kubectl exec`) that will execute the given mongo command. - :param mongo_command: The command to be executed inside MongoDB. - :return: A list of arguments to be passed to Kubernetes. - """ - return [ - "mongo", "localhost:27017/admin", - #TODO: use SSL with MongoDB. - # "--ssl", - # "--sslCAFile", "/etc/ssl/mongod/ca.pem", - # "--sslPEMKeyFile", "/etc/ssl/mongod/mongod.pem", - "--eval", mongo_command - ] - - @classmethod - def createReplicaInitiateCommand(cls, cluster_object) -> str: + def createReplicaInitiateCommand(cls, cluster_object) -> Tuple[str, dict]: """ Creates a MongoDB command that initiates the replica set, i.e. a rs.initiate() command with the host names. :param cluster_object: The cluster object from the YAML file. :return: The command to be sent to MongoDB. """ replica_set_config = cls._createReplicaConfig(cluster_object) - return "rs.initiate({})".format(json.dumps(replica_set_config)) + return "replSetInitiate", replica_set_config @classmethod - def createReplicaReconfigureCommand(cls, cluster_object) -> str: + def createReplicaReconfigureCommand(cls, cluster_object) -> Tuple[str, dict]: """ Creates a MongoDB command that reconfigures the replica set, i.e. a rs.reconfig() command with the host names. :param cluster_object: The cluster object from the YAML file. :return: The command to be sent to MongoDB. """ replica_set_config = cls._createReplicaConfig(cluster_object) - return "rs.reconfig({})".format(json.dumps(replica_set_config)) + return "replSetReconfig", replica_set_config @classmethod - def createCreateAdminCommand(cls, admin_credentials: client.V1Secret) -> str: + def createCreateAdminCommand(cls, admin_credentials: client.V1Secret)\ + -> Tuple[str, Any, Dict[str, Union[List[Dict[str, str]], Any]]]: """ Creates a MongoDB command that creates administrator users. :param admin_credentials: The admin credentials secret model. @@ -75,14 +60,13 @@ def createCreateAdminCommand(cls, admin_credentials: client.V1Secret) -> str: """ admin_username = b64decode(admin_credentials.data["username"]).decode("utf-8") admin_password = b64decode(admin_credentials.data["password"]).decode("utf-8") - return ''' - admin = db.getSiblingDB("admin") - admin.createUser({{ - user: "{user}", pwd: "{password}", - roles: [ {{ role: "root", db: "admin" }} ] - }}) - admin.auth("{user}", "{password}") - '''.format(user=admin_username, password=admin_password) + kwargs = { + "pwd": admin_password, + "roles": [ + {"role": "root", "db": "admin"} + ] + } + return "createUser", admin_username, kwargs @classmethod def createStatusCommand(cls) -> str: @@ -90,39 +74,7 @@ def createStatusCommand(cls) -> str: Returns the string that is used to retrieve the status from the MongoDB replica set. :return: The command to be sent to MongoDB. """ - return "rs.status()" - - @classmethod - def parseMongoResponse(cls, exec_response: str) -> Dict[str, any]: - """ - Parses a response from the MongoDB daemon. See `tests/fixtures/mongo_responses` for some examples. - :param exec_response: The response from Mongo. - :return: The JSON object found in the response. - :raise ValueError: If no JSON object was found. - """ - json_search = re.search(r"^[^{}]+({[\s\S]*})[\s\d]+$", exec_response) - if json_search: - clean_json = json_search.group(1) - clean_json = re.sub("Timestamp\((\d+), (\d)\)", r"\1.\2", clean_json) - clean_json = re.sub("BinData\(0,(.+)\)", r"\1", clean_json) - clean_json = re.sub("NumberLong\((-?\d+)\)", r"\1", clean_json) - clean_json = re.sub("ISODate\((\S+)\)", r"\1", clean_json) - try: - return json.loads(clean_json) - except JSONDecodeError as err: - raise ValueError("Cannot parse JSON because of error {}:\n{}".format(err, repr(clean_json))) - - exception_search = re.search(r"exception: ([^\n]+)", exec_response) - if exception_search: - raise ValueError(exception_search.group(1).strip(": ")) - - error_search = re.search(r"Error: (.+)", exec_response) - if error_search: - raise ValueError(error_search.group(1).strip(": ")) - - # MongoDB often returns an empty status when it's starting up. - logging.info("Cannot find any JSON or error in the MongoDB response: %s", repr(exec_response)) - return {} + return "replSetGetStatus" @classmethod def _createReplicaConfig(cls, cluster_object: V1MongoClusterConfiguration) -> Dict[str, any]: @@ -139,3 +91,15 @@ def _createReplicaConfig(cls, cluster_object: V1MongoClusterConfiguration) -> Di "version": 1, "members": [{"_id": i, "host": cls.getMemberHostname(i, name, namespace)} for i in range(replicas)], } + + @classmethod + def getConnectionSeeds(cls, cluster_object: V1MongoClusterConfiguration) -> List[str]: + """ + Creates a list with the replica set members for mongo. + :param cluster_object: The cluster object from the YAML file. + :return: A list with the member hostnames. + """ + name = cluster_object.metadata.name + namespace = cluster_object.metadata.namespace + replicas = cluster_object.spec.mongodb.replicas + return [cls.getMemberHostname(i, name, namespace) for i in range(replicas)] diff --git a/mongoOperator/helpers/RestoreHelper.py b/mongoOperator/helpers/RestoreHelper.py index a1ef89e..ea39e27 100644 --- a/mongoOperator/helpers/RestoreHelper.py +++ b/mongoOperator/helpers/RestoreHelper.py @@ -7,14 +7,14 @@ from base64 import b64decode from subprocess import check_output, CalledProcessError, SubprocessError -from datetime import datetime +from time import sleep from google.cloud.storage import Client as StorageClient from google.oauth2.service_account import Credentials as ServiceCredentials -from typing import Dict, Tuple from mongoOperator.helpers.MongoResources import MongoResources from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration from mongoOperator.services.KubernetesService import KubernetesService +# from mongoOperator.services.MongoService import MongoService class RestoreHelper: @@ -23,6 +23,8 @@ class RestoreHelper: """ DEFAULT_BACKUP_PREFIX = "backups" BACKUP_FILE_FORMAT = "mongodb-backup-{namespace}-{name}-{date}.archive.gz" + RESTORE_RETRIES = 4 + RESTORE_WAIT = 15.0 def __init__(self, kubernetes_service: KubernetesService): """ @@ -78,7 +80,8 @@ def _lastBackupFile(credentials: dict, bucket_name: str, key: str) -> str: if last_blob is None or blob.time_created > last_blob.time_created: last_blob = blob - return last_blob.name if last_blob else None + logging.info("Returning backup file %s", last_blob.name.replace(key, "")) + return last_blob.name.replace(key, "") if last_blob else None def restoreIfNeeded(self, cluster_object: V1MongoClusterConfiguration) -> bool: """ @@ -87,14 +90,12 @@ def restoreIfNeeded(self, cluster_object: V1MongoClusterConfiguration) -> bool: :param cluster_object: The cluster object from the YAML file. :return: Whether a restore was executed or not. """ - cluster_key = (cluster_object.metadata.name, cluster_object.metadata.namespace) - if hasattr(cluster_object.spec.backups.gcs, "restore_from"): + if cluster_object.spec.backups.gcs.restore_from is not None: backup_file = cluster_object.spec.backups.gcs.restore_from - print("backup_file", backup_file) if backup_file == 'latest': backup_file = self.getLastBackup(cluster_object) - logging.info("Attempting to restore file %s to Cluster %s @ ns/%s.", backup_file, + logging.info("Attempting to restore file %s to cluster %s @ ns/%s.", backup_file, cluster_object.metadata.name, cluster_object.metadata.namespace) self.restore(cluster_object, backup_file) @@ -102,33 +103,38 @@ def restoreIfNeeded(self, cluster_object: V1MongoClusterConfiguration) -> bool: return False - def restore(self, cluster_object: V1MongoClusterConfiguration, backup_file: str): + def restore(self, cluster_object: V1MongoClusterConfiguration, backup_file: str) -> bool: """ Attempts to restore the latest backup in the specified location to the given cluster. Creates a new backup for the given cluster saving it in the cloud storage. :param cluster_object: The cluster object from the YAML file. :param backup_file: The filename of the backup we want to restore. """ - pod_index = cluster_object.spec.mongodb.replicas - 1 # take last pod - hostname = MongoResources.getMemberHostname(pod_index, cluster_object.metadata.name, - cluster_object.metadata.namespace) + hosts = MongoResources.getConnectionSeeds(cluster_object) - logging.info("Restoring backup file %s to cluster %s @ ns/%s on %s.", backup_file, - cluster_object.metadata.name, cluster_object.metadata.namespace, hostname) + logging.info("Restoring backup file %s to cluster %s @ ns/%s.", backup_file, + cluster_object.metadata.name, cluster_object.metadata.namespace) # Download the backup file from the bucket downloaded_file = self._downloadBackup(cluster_object, backup_file) - try: - restore_output = check_output(["mongorestore", "--host", hostname, "--gzip", "--archive", - downloaded_file]) - except CalledProcessError as err: - raise SubprocessError("Could not restore '{}' to '{}'. Return code: {}\n stderr: '{}'\n stdout: '{}'" - .format(backup_file, hostname, err.returncode, err.stderr, err.stdout)) + for _ in range(self.RESTORE_RETRIES): + # Wait for the replicaset to become ready - logging.debug("Restore output: %s", restore_output) + try: + logging.info("Running mongorestore --host %s --gzip --archive=%s", ','.join(hosts), downloaded_file) + restore_output = check_output(["mongorestore", "--host", ','.join(hosts), "--gzip", + "--archive=" + downloaded_file]) + logging.info("Restore output: %s", restore_output) + os.remove(downloaded_file) + return True - os.remove(downloaded_file) + except CalledProcessError as err: + logging.error("Could not restore '{}', attempt {}. Return code: {} stderr: '{}' stdout: '{}'" + .format(backup_file, _, err.returncode, err.stderr, err.stdout)) + sleep(self.RESTORE_WAIT) + + raise SubprocessError("Could not restore '{}' after {} retries!".format(backup_file, self.RESTORE_RETRIES)) def _downloadBackup(self, cluster_object: V1MongoClusterConfiguration, backup_file: str) -> str: """ @@ -160,9 +166,9 @@ def _downloadFile(credentials: dict, bucket_name: str, key: str, file_name: str) credentials = ServiceCredentials.from_service_account_info(credentials) gcs_client = StorageClient(credentials.project_id, credentials) bucket = gcs_client.get_bucket(bucket_name) + logging.info("Going to download gcs://%s/%s", bucket_name, key) + bucket.blob(key).download_to_filename(file_name) - print(repr(credentials)) - print(repr(bucket_name)) logging.info("Backup gcs://%s/%s downloaded to %s", bucket_name, key, file_name) return file_name diff --git a/mongoOperator/services/KubernetesService.py b/mongoOperator/services/KubernetesService.py index e91c276..d1ffcd8 100644 --- a/mongoOperator/services/KubernetesService.py +++ b/mongoOperator/services/KubernetesService.py @@ -261,15 +261,3 @@ def deleteStatefulSet(self, name: str, namespace: str) -> bool: body = V1DeleteOptions() logging.info("Deleting stateful set %s @ ns/%s.", name, namespace) return self.apps_api.delete_namespaced_stateful_set(name, namespace, body) - - def execInPod(self, container, pod_name, namespace, exec_cmd) -> str: - """ - Executes a command in the pod with the given name. - :param container: The container name. - :param pod_name: The pod name. - :param namespace: The pod namespace. - :param exec_cmd: The command to execute. - :return: The command output. - """ - return stream(self.core_api.connect_get_namespaced_pod_exec, pod_name, namespace, command=exec_cmd, - container=container, stderr=True, stdin=False, stdout=True, tty=False) diff --git a/mongoOperator/services/MongoService.py b/mongoOperator/services/MongoService.py index 31f72d3..d406fb3 100644 --- a/mongoOperator/services/MongoService.py +++ b/mongoOperator/services/MongoService.py @@ -6,15 +6,17 @@ from time import sleep -from kubernetes.client.rest import ApiException - from mongoOperator.helpers.AdminSecretChecker import AdminSecretChecker from mongoOperator.helpers.MongoResources import MongoResources from mongoOperator.helpers.RestoreHelper import RestoreHelper +from mongoOperator.helpers.MongoMonitoring import CommandLogger, TopologyLogger, ServerLogger, HeartbeatLogger from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration from mongoOperator.services.KubernetesService import KubernetesService +from pymongo import MongoClient +from pymongo.errors import ConnectionFailure, OperationFailure + class MongoService: """ @@ -24,51 +26,64 @@ class MongoService: # after creating a new object definition we can get handshake failures. # below we can configure how many times we retry and how long we wait in between. - EXEC_IN_POD_RETRIES = 4 - EXEC_IN_POD_WAIT = 15.0 + MONGO_COMMAND_RETRIES = 4 + MONGO_COMMAND_WAIT = 15.0 def __init__(self, kubernetes_service: KubernetesService): self.kubernetes_service = kubernetes_service self.restore_helper = RestoreHelper(self.kubernetes_service) + self.mongo_connections = {} + self.restores_done = [] + + def _onReplicaSetReady(self, cluster_object: V1MongoClusterConfiguration) -> None: + if cluster_object.metadata.name not in self.restores_done: + # If restore was specified, load restore file + self.restore_helper.restoreIfNeeded(cluster_object) + self.restores_done.append(cluster_object.metadata.name) + def _onAllHostsReady(self, cluster_object: V1MongoClusterConfiguration) -> None: + self.initializeReplicaSet(cluster_object) - def _execInPod(self, pod_index: int, name: str, namespace: str, mongo_command: str) -> Dict[str, any]: + def _mongoAdminCommand(self, cluster_object: V1MongoClusterConfiguration, mongo_command: str, *args, **kwargs) -> Dict[str, any]: """ Executes the given mongo command inside the pod with the given name. Retries a few times in case we receive a handshake failure. - :param pod_index: The index of the pod. :param name: The name of the cluster. :param namespace: The namespace of the cluster. :param mongo_command: The command to be executed in mongo. :return: The response from MongoDB. See files in `tests/fixtures/mongo_responses` for examples. :raise ValueError: If the result could not be parsed. - :raise TimeoutError: If we could not connect to the pod after retrying. + :raise TimeoutError: If we could not connect after retrying. """ - exec_command = MongoResources.createMongoExecCommand(mongo_command) - pod_name = "{}-{}".format(name, pod_index) - - for _ in range(self.EXEC_IN_POD_RETRIES): + for _ in range(self.MONGO_COMMAND_RETRIES): try: - exec_response = self.kubernetes_service.execInPod(self.CONTAINER, pod_name, namespace, exec_command) - response = MongoResources.parseMongoResponse(exec_response) - if response and response.get("codeName") != "NodeNotFound": - return response - logging.info("Waiting for replica set members for %s @ ns/%s: %s", pod_name, namespace, response) - - except ValueError as e: - if str(e) not in ("connection attempt failed", "connect failed"): - raise - logging.info("Could not connect to Mongo in pod %s @ ns/%s: %s", pod_name, namespace, e) - - except ApiException as e: - if "Handshake status" not in e.reason: - logging.error("Error sending following command to pod %s: %s", pod_name, repr(mongo_command)) - raise - logging.info("Could not check the replica set or initialize it because of %s. The service is probably " - "starting up. We wait %s seconds before retrying.", e.reason, self.EXEC_IN_POD_WAIT) - sleep(self.EXEC_IN_POD_WAIT) - - raise TimeoutError("Could not check the replica set after {} retries!".format(self.EXEC_IN_POD_RETRIES)) + replicaset = cluster_object.metadata.name + + if replicaset not in self.mongo_connections: + self.mongo_connections[replicaset] = MongoClient( + MongoResources.getConnectionSeeds(cluster_object), + connectTimeoutMS=60000, + serverSelectionTimeoutMS=60000, + replicaSet=replicaset, + event_listeners=[CommandLogger(), + TopologyLogger(cluster_object, + replica_set_ready_callback=self._onReplicaSetReady), + ServerLogger(), + HeartbeatLogger(cluster_object, + all_hosts_ready_callback=self._onAllHostsReady) + ] + ) + + return self.mongo_connections[replicaset].admin.command(mongo_command, *args, **kwargs) + except ConnectionFailure as e: + logging.error("Exception while trying to connect to Mongo: %s", str(e)) + + logging.info("Command timed out, waiting %s seconds before trying again (attempt %s/%s)", + self.MONGO_COMMAND_WAIT, _, self.MONGO_COMMAND_RETRIES) + + sleep(self.MONGO_COMMAND_WAIT) + + raise TimeoutError("Could not execute command after {} retries!".format(self.MONGO_COMMAND_RETRIES)) def initializeReplicaSet(self, cluster_object: V1MongoClusterConfiguration) -> None: """ @@ -80,17 +95,14 @@ def initializeReplicaSet(self, cluster_object: V1MongoClusterConfiguration) -> N cluster_name = cluster_object.metadata.name namespace = cluster_object.metadata.namespace - create_replica_command = MongoResources.createReplicaInitiateCommand(cluster_object) - - create_replica_response = self._execInPod(0, cluster_name, namespace, create_replica_command) + create_replica_command, create_replica_args = MongoResources.createReplicaInitiateCommand(cluster_object) + conn = MongoClient(MongoResources.getMemberHostname(0, cluster_name, namespace)) + create_replica_response = conn.admin.command(create_replica_command, create_replica_args) logging.debug("Initializing replica, received %s", repr(create_replica_response)) if create_replica_response["ok"] == 1: logging.info("Initialized replica set %s @ ns/%s", cluster_name, namespace) - - # If restore was specified, load restore file - self.restore_helper.restoreIfNeeded(cluster_object) else: raise ValueError("Unexpected response initializing replica set {} @ ns/{}:\n{}" .format(cluster_name, namespace, create_replica_response)) @@ -106,9 +118,8 @@ def reconfigureReplicaSet(self, cluster_object: V1MongoClusterConfiguration) -> namespace = cluster_object.metadata.namespace replicas = cluster_object.spec.mongodb.replicas - reconfigure_command = MongoResources.createReplicaReconfigureCommand(cluster_object) - - reconfigure_response = self._execInPod(0, cluster_name, namespace, reconfigure_command) + reconfigure_command, reconfigure_args = MongoResources.createReplicaReconfigureCommand(cluster_object) + reconfigure_response = self._mongoAdminCommand(cluster_object, reconfigure_command, reconfigure_args) logging.debug("Reconfiguring replica, received %s", repr(reconfigure_response)) @@ -131,20 +142,24 @@ def checkReplicaSetOrInitialize(self, cluster_object: V1MongoClusterConfiguratio create_status_command = MongoResources.createStatusCommand() - create_status_response = self._execInPod(0, cluster_name, namespace, create_status_command) - logging.debug("Checking replicas, received %s", repr(create_status_response)) - - # If the replica set is not initialized yet, we initialize it - if create_status_response["ok"] == 0 and create_status_response["codeName"] == "NotYetInitialized": - return self.initializeReplicaSet(cluster_object) - - elif create_status_response["ok"] == 1: - logging.info("The replica set %s @ ns/%s seems to be working properly with %s/%s pods.", - cluster_name, namespace, len(create_status_response["members"]), replicas) - if replicas != len(create_status_response["members"]): - self.reconfigureReplicaSet(cluster_object) - else: - raise ValueError("Unexpected response trying to check replicas: '{}'".format(repr(create_status_response))) + try: + create_status_response = self._mongoAdminCommand(cluster_object, create_status_command) + logging.debug("Checking replicas, received %s", repr(create_status_response)) + + if create_status_response["ok"] == 1: + logging.info("The replica set %s @ ns/%s seems to be working properly with %s/%s pods.", + cluster_name, namespace, len(create_status_response["members"]), replicas) + if replicas != len(create_status_response["members"]): + self.reconfigureReplicaSet(cluster_object) + else: + raise ValueError("Unexpected response trying to check replicas: '{}'".format( + repr(create_status_response))) + + except OperationFailure as e: + # If the replica set is not initialized yet, we initialize it + if str(e) == "no replset config has been received": + return self.initializeReplicaSet(cluster_object) + raise def createUsers(self, cluster_object: V1MongoClusterConfiguration) -> None: """ @@ -155,44 +170,12 @@ def createUsers(self, cluster_object: V1MongoClusterConfiguration) -> None: """ cluster_name = cluster_object.metadata.name namespace = cluster_object.metadata.namespace - replicas = cluster_object.spec.mongodb.replicas secret_name = AdminSecretChecker.getSecretName(cluster_name) admin_credentials = self.kubernetes_service.getSecret(secret_name, namespace) - create_admin_command = MongoResources.createCreateAdminCommand(admin_credentials) - - logging.info("Creating users for %s pods", replicas) - - for _ in range(self.EXEC_IN_POD_RETRIES): - for i in range(replicas): - # see tests for examples of these responses. - try: - exec_response = self._execInPod(i, cluster_name, namespace, create_admin_command) - if "user" in exec_response: - logging.info("Created users for pod %s-%s @ ns/%s", cluster_name, i, namespace) - return - - raise ValueError("Unexpected response creating users for pod {}-{} @ ns/{}:\n{}" - .format(cluster_name, i, namespace, exec_response)) - - except ValueError as err: - err_str = str(err) - - if "couldn't add user: not master" in err_str: - # most of the time member 0 is elected master, otherwise we get this error and need to loop through - # members until we find the master - logging.info("The user could not be created in pod %s-%s because it's not master.", cluster_name, i) - continue - - if "already exists" in err_str: - logging.info("User creation not necessary: %s", err_str) - return - - raise - - logging.info("Could not create users in any of the %s pods of cluster %s @ ns/%s. We wait %s seconds " - "before retrying.", replicas, cluster_name, namespace, self.EXEC_IN_POD_WAIT) - sleep(self.EXEC_IN_POD_WAIT) - - raise TimeoutError("Could not create users in any of the {} pods of cluster {} @ ns/{}." - .format(replicas, cluster_name, namespace)) + create_admin_command, create_admin_args, create_admin_kwargs = MongoResources.createCreateAdminCommand( + admin_credentials) + logging.info("Creating admin user.") + create_admin_response = self._mongoAdminCommand(cluster_object, create_admin_command, create_admin_args, + **create_admin_kwargs) + logging.info("Got response: %s", create_admin_response) diff --git a/requirements.txt b/requirements.txt index 2d58766..c34abf0 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,5 +1,5 @@ PyYAML kubernetes==8.0.0 -PyMongo +pymongo croniter -google-cloud-storage \ No newline at end of file +google-cloud-storage diff --git a/restore-from-backup-local.sh b/restore-from-backup-local.sh index e139326..2e04349 100755 --- a/restore-from-backup-local.sh +++ b/restore-from-backup-local.sh @@ -1,14 +1,48 @@ #!/usr/bin/env bash +set -e +set -o pipefail -POD_NAME=$(kubectl get pods | grep -e "mongo-operator.*Running" | cut --fields=1 --delimiter=" ") +# set the environment of the minikube docker +eval $(minikube docker-env) + +readonly NAMESPACE="mongo-operator-cluster" +readonly KUBECTL="kubectl --namespace=${NAMESPACE}" + +# build the docker image +docker build --tag ultimaker/k8s-mongo-operator:local . + +# print out the Kubernetes client and server versions +${KUBECTL} version || true + + +if ! kubectl get namespace ${NAMESPACE}; then + kubectl create namespace ${NAMESPACE} +fi + +# remove the deployment, if needed, and apply the new one +${KUBECTL} delete deployment mongo-operator 2>/dev/null || true +${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/service-account.yaml || true +${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/cluster-role.yaml || true +${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/cluster-role-binding.yaml || true +${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/deployment.yaml || true + +# show some details about the deployment +${KUBECTL} describe deploy mongo-operator + +# create a secret with the google account credentials +${KUBECTL} create secret generic storage-serviceaccount --from-file=json=google_credentials.json || true + +# wait for the pod to startup to retrieve its name +sleep 10 +POD_NAME=$(${KUBECTL} get pods | grep -e "mongo-operator.*Running" | cut --fields=1 --delimiter=" ") if [ -z $POD_NAME ]; then echo "The operator pod is not running!" - kubectl get pods + ${KUBECTL} get pods exit 1 fi # apply the example file -kubectl apply --filename=examples/mongo-3-replicas-from-restore.yaml +${KUBECTL} apply --filename=examples/mongo-3-replicas-from-backup.yaml # show the pod logs -kubectl logs ${POD_NAME} --follow +${KUBECTL} logs ${POD_NAME} --follow diff --git a/tests/fixtures/mongo_responses/createUser-exists.txt b/tests/fixtures/mongo_responses/createUser-exists.txt deleted file mode 100644 index e247bf8..0000000 --- a/tests/fixtures/mongo_responses/createUser-exists.txt +++ /dev/null @@ -1,7 +0,0 @@ -MongoDB shell version v3.6.4 -connecting to: mongodb://localhost:27017/admin -MongoDB server version: 3.6.4 -2018-06-07T09:26:40.768+0000 E QUERY [thread1] Error: couldn't add user: User "root@admin" already exists : -_getErrorWithCode@src/mongo/shell/utils.js:25:13 -DB.prototype.createUser@src/mongo/shell/db.js:1437:15 -@(shell eval):3:1 \ No newline at end of file diff --git a/tests/fixtures/mongo_responses/createUser-notMaster.txt b/tests/fixtures/mongo_responses/createUser-notMaster.txt deleted file mode 100644 index 1e367ab..0000000 --- a/tests/fixtures/mongo_responses/createUser-notMaster.txt +++ /dev/null @@ -1,7 +0,0 @@ -MongoDB shell version v3.6.4 -connecting to: mongodb://localhost:27017/admin -MongoDB server version: 3.6.4 -2018-06-07T09:51:45.678+0000 E QUERY [thread1] Error: couldn't add user: not master : -_getErrorWithCode@src/mongo/shell/utils.js:25:13 -DB.prototype.createUser@src/mongo/shell/db.js:1437:15 -@(shell eval):3:1 diff --git a/tests/fixtures/mongo_responses/createUser-ok.json b/tests/fixtures/mongo_responses/createUser-ok.json new file mode 100644 index 0000000..0897e4e --- /dev/null +++ b/tests/fixtures/mongo_responses/createUser-ok.json @@ -0,0 +1 @@ +{"ok": 1.0, "operationTime": {"$timestamp": {"t": 1549962075, "i": 4}}, "$clusterTime": {"clusterTime": {"$timestamp": {"t": 1549962075, "i": 4}}, "signature": {"hash": {"$binary": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", "$type": "00"}, "keyId": 0}}} \ No newline at end of file diff --git a/tests/fixtures/mongo_responses/createUser-ok.txt b/tests/fixtures/mongo_responses/createUser-ok.txt deleted file mode 100644 index bf6ec58..0000000 --- a/tests/fixtures/mongo_responses/createUser-ok.txt +++ /dev/null @@ -1,13 +0,0 @@ -MongoDB shell version v3.6.4 -connecting to: mongodb://localhost:27017/admin -MongoDB server version: 3.6.4 -Successfully added user: { - "user" : "root", - "roles" : [ - { - "role" : "root", - "db" : "admin" - } - ] -} -1 diff --git a/tests/fixtures/mongo_responses/initiate-not-found.txt b/tests/fixtures/mongo_responses/initiate-not-found.txt deleted file mode 100644 index 802741a..0000000 --- a/tests/fixtures/mongo_responses/initiate-not-found.txt +++ /dev/null @@ -1,9 +0,0 @@ -MongoDB shell version v3.6.4 -connecting to: mongodb://localhost:27017/admin -MongoDB server version: 3.6.4 -{ - "ok" : 0, - "errmsg" : "replSetInitiate quorum check failed because not all proposed set members responded affirmatively: some-db-2.some-db.default.svc.cluster.local:27017 failed with Connection refused", - "code" : 74, - "codeName" : "NodeNotFound" -} diff --git a/tests/fixtures/mongo_responses/initiate-ok.json b/tests/fixtures/mongo_responses/initiate-ok.json new file mode 100644 index 0000000..b8a4e62 --- /dev/null +++ b/tests/fixtures/mongo_responses/initiate-ok.json @@ -0,0 +1 @@ +{"ok": 1.0, "operationTime": {"$timestamp": {"t": 1549963040, "i": 1}}, "$clusterTime": {"clusterTime": {"$timestamp": {"t": 1549963040, "i": 1}}, "signature": {"hash": {"$binary": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", "$type": "00"}, "keyId": 0}}} \ No newline at end of file diff --git a/tests/fixtures/mongo_responses/initiate-ok.txt b/tests/fixtures/mongo_responses/initiate-ok.txt deleted file mode 100644 index 01aa6dd..0000000 --- a/tests/fixtures/mongo_responses/initiate-ok.txt +++ /dev/null @@ -1,14 +0,0 @@ -MongoDB shell version v3.6.4 -connecting to: mongodb://localhost:27017/admin -MongoDB server version: 3.6.4 -{ - "ok" : 1, - "operationTime" : Timestamp(1528365094, 1), - "$clusterTime" : { - "clusterTime" : Timestamp(1528365094, 1), - "signature" : { - "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="), - "keyId" : NumberLong(0) - } - } -} diff --git a/tests/fixtures/mongo_responses/replica-status-error.txt b/tests/fixtures/mongo_responses/replica-status-error.txt deleted file mode 100644 index 1cb4c2e..0000000 --- a/tests/fixtures/mongo_responses/replica-status-error.txt +++ /dev/null @@ -1,7 +0,0 @@ -MongoDB shell version v3.6.4 -connecting to: mongodb://localhost:27017/admin -2018-06-07T09:51:21.877+0000 W NETWORK [thread1] Failed to connect to 127.0.0.1:27017, in(checking socket for error after poll), reason: Connection refused -2018-06-07T09:51:21.878+0000 E QUERY [thread1] Error: couldn't connect to server localhost:27017, connection attempt failed : -connect@src/mongo/shell/mongo.js:251:13 -@(connect):1:6 -exception: connect failed diff --git a/tests/fixtures/mongo_responses/replica-status-not-initialized.txt b/tests/fixtures/mongo_responses/replica-status-not-initialized.txt deleted file mode 100644 index 98f177d..0000000 --- a/tests/fixtures/mongo_responses/replica-status-not-initialized.txt +++ /dev/null @@ -1,10 +0,0 @@ -MongoDB shell version v3.6.4 -connecting to: mongodb://localhost:27017/admin -MongoDB server version: 3.6.4 -{ - "info" : "run rs.initiate(...) if not yet done for the set", - "ok" : 0, - "errmsg" : "no replset config has been received", - "code" : 94, - "codeName" : "NotYetInitialized" -} diff --git a/tests/fixtures/mongo_responses/replica-status-ok.json b/tests/fixtures/mongo_responses/replica-status-ok.json new file mode 100644 index 0000000..e0ebe5f --- /dev/null +++ b/tests/fixtures/mongo_responses/replica-status-ok.json @@ -0,0 +1,179 @@ +{ + "set": "mongotest", + "date": { + "$date": 1549871357279 + }, + "myState": 1, + "term": 1, + "syncingTo": "", + "syncSourceHost": "", + "syncSourceId": -1, + "heartbeatIntervalMillis": 2000, + "optimes": { + "lastCommittedOpTime": { + "ts": { + "$timestamp": { + "t": 0, + "i": 0 + } + }, + "t": -1 + }, + "appliedOpTime": { + "ts": { + "$timestamp": { + "t": 1549871355, + "i": 1 + } + }, + "t": -1 + }, + "durableOpTime": { + "ts": { + "$timestamp": { + "t": 1549871355, + "i": 1 + } + }, + "t": -1 + } + }, + "lastStableCheckpointTimestamp": { + "$timestamp": { + "t": 0, + "i": 0 + } + }, + "members": [ + { + "_id": 0, + "name": "c87cdec35e3c:27017", + "health": 1.0, + "state": 1, + "stateStr": "PRIMARY", + "uptime": 90, + "optime": { + "ts": { + "$timestamp": { + "t": 1549871355, + "i": 1 + } + }, + "t": -1 + }, + "optimeDate": { + "$date": 1549871355000 + }, + "syncingTo": "", + "syncSourceHost": "", + "syncSourceId": -1, + "infoMessage": "", + "electionTime": { + "$timestamp": { + "t": 1549871355, + "i": 2 + } + }, + "electionDate": { + "$date": 1549871355000 + }, + "configVersion": 1, + "self": true, + "lastHeartbeatMessage": "" + }, + { + "_id": 0, + "name": "c87cdec35e3d:27017", + "health": 1.0, + "state": 1, + "stateStr": "SECONDARY", + "uptime": 90, + "optime": { + "ts": { + "$timestamp": { + "t": 1549871355, + "i": 1 + } + }, + "t": -1 + }, + "optimeDate": { + "$date": 1549871355000 + }, + "syncingTo": "", + "syncSourceHost": "", + "syncSourceId": -1, + "infoMessage": "", + "electionTime": { + "$timestamp": { + "t": 1549871355, + "i": 2 + } + }, + "electionDate": { + "$date": 1549871355000 + }, + "configVersion": 1, + "self": true, + "lastHeartbeatMessage": "" + }, + { + "_id": 0, + "name": "c87cdec35e3e:27017", + "health": 1.0, + "state": 1, + "stateStr": "SECONDARY", + "uptime": 90, + "optime": { + "ts": { + "$timestamp": { + "t": 1549871355, + "i": 1 + } + }, + "t": -1 + }, + "optimeDate": { + "$date": 1549871355000 + }, + "syncingTo": "", + "syncSourceHost": "", + "syncSourceId": -1, + "infoMessage": "", + "electionTime": { + "$timestamp": { + "t": 1549871355, + "i": 2 + } + }, + "electionDate": { + "$date": 1549871355000 + }, + "configVersion": 1, + "self": true, + "lastHeartbeatMessage": "" + } + ], + "ok": 1.0, + "operationTime": { + "$timestamp": { + "t": 1549871355, + "i": 1 + } + }, + "$clusterTime": { + "clusterTime": { + "$timestamp": { + "t": 1549871355, + "i": 2 + } + }, + "signature": { + "hash": { + "$binary": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", + "$type": "00" + }, + "keyId": 0 + } + } +} diff --git a/tests/fixtures/mongo_responses/replica-status-ok.txt b/tests/fixtures/mongo_responses/replica-status-ok.txt deleted file mode 100644 index 1b09d83..0000000 --- a/tests/fixtures/mongo_responses/replica-status-ok.txt +++ /dev/null @@ -1,102 +0,0 @@ -MongoDB shell version v3.6.4 -connecting to: mongodb://localhost:27017/admin -MongoDB server version: 3.6.4 -{ - "set" : "some-db", - "date" : ISODate("2018-06-07T09:13:07.663Z"), - "myState" : 1, - "term" : NumberLong(1), - "heartbeatIntervalMillis" : NumberLong(2000), - "optimes" : { - "lastCommittedOpTime" : { - "ts" : Timestamp(1528362783, 1), - "t" : NumberLong(1) - }, - "readConcernMajorityOpTime" : { - "ts" : Timestamp(1528362783, 1), - "t" : NumberLong(1) - }, - "appliedOpTime" : { - "ts" : Timestamp(1528362783, 1), - "t" : NumberLong(1) - }, - "durableOpTime" : { - "ts" : Timestamp(1528362783, 1), - "t" : NumberLong(1) - } - }, - "members" : [ - { - "_id" : 0, - "name" : "some-db-0.some-db.default.svc.cluster.local:27017", - "health" : 1, - "state" : 1, - "stateStr" : "PRIMARY", - "uptime" : 210, - "optime" : { - "ts" : Timestamp(1528362783, 1), - "t" : NumberLong(1) - }, - "optimeDate" : ISODate("2018-06-07T09:13:03Z"), - "electionTime" : Timestamp(1528362622, 1), - "electionDate" : ISODate("2018-06-07T09:10:22Z"), - "configVersion" : 1, - "self" : true - }, - { - "_id" : 1, - "name" : "some-db-1.some-db.default.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 178, - "optime" : { - "ts" : Timestamp(1528362783, 1), - "t" : NumberLong(1) - }, - "optimeDurable" : { - "ts" : Timestamp(1528362783, 1), - "t" : NumberLong(1) - }, - "optimeDate" : ISODate("2018-06-07T09:13:03Z"), - "optimeDurableDate" : ISODate("2018-06-07T09:13:03Z"), - "lastHeartbeat" : ISODate("2018-06-07T09:13:07.162Z"), - "lastHeartbeatRecv" : ISODate("2018-06-07T09:13:07.265Z"), - "pingMs" : NumberLong(0), - "syncingTo" : "some-db-2.some-db.default.svc.cluster.local:27017", - "configVersion" : 1 - }, - { - "_id" : 2, - "name" : "some-db-2.some-db.default.svc.cluster.local:27017", - "health" : 1, - "state" : 2, - "stateStr" : "SECONDARY", - "uptime" : 178, - "optime" : { - "ts" : Timestamp(1528362783, 1), - "t" : NumberLong(1) - }, - "optimeDurable" : { - "ts" : Timestamp(1528362783, 1), - "t" : NumberLong(1) - }, - "optimeDate" : ISODate("2018-06-07T09:13:03Z"), - "optimeDurableDate" : ISODate("2018-06-07T09:13:03Z"), - "lastHeartbeat" : ISODate("2018-06-07T09:13:06.564Z"), - "lastHeartbeatRecv" : ISODate("2018-06-07T09:13:06.760Z"), - "pingMs" : NumberLong(6), - "syncingTo" : "some-db-0.some-db.default.svc.cluster.local:27017", - "configVersion" : 1 - } - ], - "ok" : 1, - "operationTime" : Timestamp(1528362783, 1), - "$clusterTime" : { - "clusterTime" : Timestamp(1528362785, 0), - "signature" : { - "hash" : BinData(0,"AAAAAAAAAAAAAAAAAAAAAAAAAAA="), - "keyId" : NumberLong(0) - } - } -} diff --git a/tests/helpers/TestAdminSecretChecker.py b/tests/helpers/TestAdminSecretChecker.py index 260090b..7323eb7 100644 --- a/tests/helpers/TestAdminSecretChecker.py +++ b/tests/helpers/TestAdminSecretChecker.py @@ -38,7 +38,7 @@ def test_createResource(self, b64encode_mock): result = self.checker.createResource(self.cluster_object) self.assertEqual(self.kubernetes_service.createSecret.return_value, result) self.kubernetes_service.createSecret.assert_called_once_with( - self.secret_name, "default", {"username": "root", "password": "random-password"} + self.secret_name, self.cluster_object.metadata.namespace, {"username": "root", "password": "random-password"} ) @patch("mongoOperator.helpers.AdminSecretChecker.b64encode") @@ -47,7 +47,7 @@ def test_updateResource(self, b64encode_mock): result = self.checker.updateResource(self.cluster_object) self.assertEqual(self.kubernetes_service.updateSecret.return_value, result) self.kubernetes_service.updateSecret.assert_called_once_with( - self.secret_name, "default", {"username": "root", "password": "random-password"} + self.secret_name, self.cluster_object.metadata.namespace, {"username": "root", "password": "random-password"} ) def test_deleteResource(self): diff --git a/tests/helpers/TestBackupChecker.py b/tests/helpers/TestBackupChecker.py index 73132be..f3bf27c 100644 --- a/tests/helpers/TestBackupChecker.py +++ b/tests/helpers/TestBackupChecker.py @@ -37,7 +37,7 @@ def test_backupIfNeeded_check_if_needed(self, backup_mock): # this backup is executed every hour at 0 minutes. self.assertEqual("0 * * * *", self.cluster_object.spec.backups.cron) - key = ('mongo-cluster', 'default') + key = ('mongo-cluster', self.cluster_object.metadata.namespace) expected_calls = [] current_date = datetime(2018, 2, 28, 12, 30, 0) @@ -81,14 +81,16 @@ def test_backupIfNeeded_check_if_needed(self, backup_mock): @patch("mongoOperator.helpers.BackupChecker.check_output") def test_backup(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock): current_date = datetime(2018, 2, 28, 14, 0, 0) - expected_backup_name = "mongodb-backup-default-mongo-cluster-2018-02-28_140000.archive.gz" + expected_backup_name = "mongodb-backup-" + self.cluster_object.metadata.namespace +\ + "-mongo-cluster-2018-02-28_140000.archive.gz" self.checker.backup(self.cluster_object, current_date) - self.assertEqual([call.getSecret('storage-serviceaccount', 'default')], self.kubernetes_service.mock_calls) + self.assertEqual([call.getSecret('storage-serviceaccount', self.cluster_object.metadata.namespace)], self.kubernetes_service.mock_calls) subprocess_mock.assert_called_once_with([ - 'mongodump', '--host', 'mongo-cluster-2.mongo-cluster.default.svc.cluster.local', '--gzip', + 'mongodump', '--host', 'mongo-cluster-2.mongo-cluster.' + self.cluster_object.metadata.namespace + + '.svc.cluster.local', '--gzip', '--archive=/tmp/' + expected_backup_name ]) @@ -116,8 +118,10 @@ def test_backup_mongo_error(self, subprocess_mock): with self.assertRaises(SubprocessError) as context: self.checker.backup(self.cluster_object, current_date) - self.assertEqual("Could not backup 'mongo-cluster-2.mongo-cluster.default.svc.cluster.local' to " - "'/tmp/mongodb-backup-default-mongo-cluster-2018-02-28_140000.archive.gz'. " + self.assertEqual("Could not backup 'mongo-cluster-2.mongo-cluster." + self.cluster_object.metadata.namespace + + ".svc.cluster.local' to " + "'/tmp/mongodb-backup-" + self.cluster_object.metadata.namespace + + "-mongo-cluster-2018-02-28_140000.archive.gz'. " "Return code: 3\n stderr: 'error'\n stdout: 'output'", str(context.exception)) diff --git a/tests/helpers/TestBaseResourceChecker.py b/tests/helpers/TestBaseResourceChecker.py index 04abc7d..3af1713 100644 --- a/tests/helpers/TestBaseResourceChecker.py +++ b/tests/helpers/TestBaseResourceChecker.py @@ -56,22 +56,25 @@ def test_cleanResources_found(self): self.kubernetes_service.getMongoObject.return_value = self.cluster_object self.checker.listResources = MagicMock(return_value=[self.cluster_object]) self.checker.cleanResources() - self.assertEqual([call.getMongoObject('mongo-cluster', 'default')], self.kubernetes_service.mock_calls) + self.assertEqual([call.getMongoObject('mongo-cluster', self.cluster_object.metadata.namespace)], + self.kubernetes_service.mock_calls) def test_cleanResources_not_found(self): self.kubernetes_service.getMongoObject.side_effect = ApiException(404) self.checker.listResources = MagicMock(return_value=[self.cluster_object]) self.checker.deleteResource = MagicMock() self.checker.cleanResources() - self.assertEqual([call.getMongoObject('mongo-cluster', 'default')], self.kubernetes_service.mock_calls) - self.checker.deleteResource.assert_called_once_with('mongo-cluster', 'default') + self.assertEqual([call.getMongoObject('mongo-cluster', self.cluster_object.metadata.namespace)], + self.kubernetes_service.mock_calls) + self.checker.deleteResource.assert_called_once_with('mongo-cluster', self.cluster_object.metadata.namespace) def test_cleanResources_error(self): self.kubernetes_service.getMongoObject.side_effect = ApiException(400) self.checker.listResources = MagicMock(return_value=[self.cluster_object]) with self.assertRaises(ApiException): self.checker.cleanResources() - self.assertEqual([call.getMongoObject('mongo-cluster', 'default')], self.kubernetes_service.mock_calls) + self.assertEqual([call.getMongoObject('mongo-cluster', self.cluster_object.metadata.namespace)], + self.kubernetes_service.mock_calls) def test_listResources(self): with self.assertRaises(NotImplementedError): diff --git a/tests/helpers/TestClusterChecker.py b/tests/helpers/TestClusterChecker.py index 534e389..3ddfc4b 100644 --- a/tests/helpers/TestClusterChecker.py +++ b/tests/helpers/TestClusterChecker.py @@ -4,10 +4,11 @@ from copy import deepcopy from unittest import TestCase from unittest.mock import patch, call - from mongoOperator.helpers.ClusterChecker import ClusterChecker +from mongoOperator.helpers.MongoResources import MongoResources from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration from tests.test_utils import getExampleClusterDefinition +from bson.json_util import loads class TestClusterChecker(TestCase): @@ -23,8 +24,8 @@ def setUp(self): self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) def _getMongoFixture(self, name): - with open("tests/fixtures/mongo_responses/{}.txt".format(name)) as f: - return f.read() + with open("tests/fixtures/mongo_responses/{}.json".format(name), "rb") as f: + return loads(f.read()) def test___init__(self): self.assertEqual(self.kubernetes_service, self.checker.kubernetes_service) @@ -52,16 +53,18 @@ def test_checkExistingClusters_bad_format(self): self.assertEqual(expected, self.kubernetes_service.mock_calls) self.assertEqual({}, self.checker.cluster_versions) + @patch("mongoOperator.services.MongoService.MongoClient") @patch("mongoOperator.helpers.BackupChecker.BackupChecker.backupIfNeeded") - def test_checkExistingClusters(self, backup_mock): - self.checker.cluster_versions[("mongo-cluster", "default")] = "100" # checkCluster will assume cached version + def test_checkExistingClusters(self, backup_mock, mongoclient_mock): + # checkCluster will assume cached version + self.checker.cluster_versions[("mongo-cluster", self.cluster_object.metadata.namespace)] = "100" self.kubernetes_service.listMongoObjects.return_value = {"items": [self.cluster_dict]} - self.kubernetes_service.execInPod.return_value = self._getMongoFixture("replica-status-ok") + mongoclient_mock.return_value.admin.command.return_value = self._getMongoFixture("replica-status-ok") self.checker.checkExistingClusters() - self.assertEqual({("mongo-cluster", "default"): "100"}, self.checker.cluster_versions) - expected = [call.listMongoObjects(), - call.execInPod('mongodb', 'mongo-cluster-0', 'default', - ['mongo', 'localhost:27017/admin', '--eval', 'rs.status()'])] + self.assertEqual({("mongo-cluster", self.cluster_object.metadata.namespace): "100"}, + self.checker.cluster_versions) + expected = [call.listMongoObjects()] + print(repr(self.kubernetes_service.mock_calls)) self.assertEqual(expected, self.kubernetes_service.mock_calls) backup_mock.assert_called_once_with(self.cluster_object) @@ -120,32 +123,41 @@ def test_collectGarbage(self, list_mock, clean_mock): self.assertEqual([call()] * 3, clean_mock.mock_calls) self.assertEqual([], self.kubernetes_service.mock_calls) # k8s is not called because we mocked everything + @patch("mongoOperator.services.MongoService.MongoClient") @patch("mongoOperator.helpers.BackupChecker.BackupChecker.backupIfNeeded") - def test_checkCluster_same_version(self, backup_mock): - self.checker.cluster_versions[("mongo-cluster", "default")] = "100" # checkCluster will assume cached version - self.kubernetes_service.execInPod.return_value = self._getMongoFixture("replica-status-ok") + def test_checkCluster_same_version(self, backup_mock, mongoclient_mock): + # checkCluster will assume cached version + self.checker.cluster_versions[("mongo-cluster", self.cluster_object.metadata.namespace)] = "100" + mongoclient_mock.return_value.admin.command.return_value = self._getMongoFixture("replica-status-ok") + self.checker.checkCluster(self.cluster_object) - self.assertEqual({("mongo-cluster", "default"): "100"}, self.checker.cluster_versions) - expected = [call.execInPod('mongodb', 'mongo-cluster-0', 'default', - ['mongo', 'localhost:27017/admin', '--eval', 'rs.status()'])] - self.assertEqual(expected, self.kubernetes_service.mock_calls) + self.assertEqual({("mongo-cluster", self.cluster_object.metadata.namespace): "100"}, + self.checker.cluster_versions) + + # expected = [ + # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), + # call().admin.command('replSetGetStatus') + # ] + # print("actual:", repr(mongoclient_mock.mock_calls)) + # print("expected:", repr(expected)) + # self.assertEqual(expected, mongoclient_mock.mock_calls) backup_mock.assert_called_once_with(self.cluster_object) + @patch("mongoOperator.services.MongoService.MongoClient") @patch("mongoOperator.helpers.BackupChecker.BackupChecker.backupIfNeeded") @patch("mongoOperator.helpers.MongoResources.MongoResources.createCreateAdminCommand") @patch("mongoOperator.helpers.BaseResourceChecker.BaseResourceChecker.checkResource") - def test_checkCluster_new_version(self, check_mock, admin_mock, backup_mock): - self.checker.cluster_versions[("mongo-cluster", "default")] = "50" - self.kubernetes_service.execInPod.side_effect = (self._getMongoFixture("replica-status-ok"), - self._getMongoFixture("createUser-exists")) + def test_checkCluster_new_version(self, check_mock, admin_mock, backup_mock, mongoclient_mock): + admin_mock.return_value = "createUser", "foo", {} + self.checker.cluster_versions[("mongo-cluster", self.cluster_object.metadata.namespace)] = "50" + mongoclient_mock.return_value.admin.command.side_effect = (self._getMongoFixture("replica-status-ok"), + self._getMongoFixture("createUser-ok")) self.checker.checkCluster(self.cluster_object) - self.assertEqual({("mongo-cluster", "default"): "100"}, self.checker.cluster_versions) - expected = [call.execInPod('mongodb', 'mongo-cluster-0', 'default', - ['mongo', 'localhost:27017/admin', '--eval', 'rs.status()']), - call.getSecret('mongo-cluster-admin-credentials', 'default'), - call.execInPod('mongodb', 'mongo-cluster-0', 'default', [ - 'mongo', 'localhost:27017/admin', '--eval', admin_mock.return_value - ])] + self.assertEqual({("mongo-cluster", self.cluster_object.metadata.namespace): "100"}, + self.checker.cluster_versions) + expected = [call.getSecret('mongo-cluster-admin-credentials', self.cluster_object.metadata.namespace)] self.assertEqual(expected, self.kubernetes_service.mock_calls) self.assertEqual([call(self.cluster_object)] * 3, check_mock.mock_calls) backup_mock.assert_called_once_with(self.cluster_object) + + self.assertEqual([call(self.kubernetes_service.getSecret())], admin_mock.mock_calls) diff --git a/tests/helpers/TestMongoMonitoring.py b/tests/helpers/TestMongoMonitoring.py new file mode 100644 index 0000000..1bc1b99 --- /dev/null +++ b/tests/helpers/TestMongoMonitoring.py @@ -0,0 +1,50 @@ +# Copyright (c) 2018 Ultimaker +# !/usr/bin/env python +# -*- coding: utf-8 -*- + +from mongoOperator.helpers.MongoMonitoring import CommandLogger, TopologyLogger, ServerLogger, HeartbeatLogger + +from unittest import TestCase +from unittest.mock import MagicMock, patch, call + + +class CommandEventMock: + """ + Mock implementation of a CommandEvent. + """ + command_name = "foo" + request_id = 1 + connection_id = 1 + duration_micros = 10000 + + +class ServerDescriptionEventMock: + server_type = "foo" + server_type_name = "foo" + + +class ServerEventMock: + """ + Mock implementation of a ServerEvent. + """ + server_address = "localhost" + topology_id = 1 + previous_description = ServerDescriptionEventMock() + new_description = ServerDescriptionEventMock() + + +class TestRestoreHelper(TestCase): + + def setUp(self): + return + + def test_commandLogger(self): + commandlogger = CommandLogger() + commandlogger.started(event=CommandEventMock()) + commandlogger.succeeded(event=CommandEventMock()) + commandlogger.failed(event=CommandEventMock()) + + def test_serverLogger(self): + serverlogger = ServerLogger() + serverlogger.opened(event=ServerEventMock()) + serverlogger.description_changed(event=ServerEventMock()) diff --git a/tests/helpers/TestMongoResources.py b/tests/helpers/TestMongoResources.py deleted file mode 100644 index cfed625..0000000 --- a/tests/helpers/TestMongoResources.py +++ /dev/null @@ -1,126 +0,0 @@ -# Copyright (c) 2018 Ultimaker -# !/usr/bin/env python -# -*- coding: utf-8 -*- -from unittest import TestCase - -from mongoOperator.helpers.MongoResources import MongoResources - - -class TestMongoResources(TestCase): - # note: most methods are tested in TestMongoService.py - - def test_parseMongoResponse_ok(self): - with open("tests/fixtures/mongo_responses/replica-status-ok.txt") as f: - response = MongoResources.parseMongoResponse(f.read()) - expected = { - '$clusterTime': { - 'clusterTime': 1528362785.0, - 'signature': { - 'hash': 'AAAAAAAAAAAAAAAAAAAAAAAAAAA=', - 'keyId': 0 - } - }, - 'date': '2018-06-07T09:13:07.663Z', - 'heartbeatIntervalMillis': 2000, - 'members': [{ - '_id': 0, - 'configVersion': 1, - 'electionDate': '2018-06-07T09:10:22Z', - 'electionTime': 1528362622.1, - 'health': 1, - 'name': 'some-db-0.some-db.default.svc.cluster.local:27017', - 'optime': {'t': 1, 'ts': 1528362783.1}, - 'optimeDate': '2018-06-07T09:13:03Z', - 'self': True, - 'state': 1, - 'stateStr': 'PRIMARY', - 'uptime': 210 - }, { - '_id': 1, - 'configVersion': 1, - 'health': 1, - 'lastHeartbeat': '2018-06-07T09:13:07.162Z', - 'lastHeartbeatRecv': '2018-06-07T09:13:07.265Z', - 'name': 'some-db-1.some-db.default.svc.cluster.local:27017', - 'optime': {'t': 1, 'ts': 1528362783.1}, - 'optimeDate': '2018-06-07T09:13:03Z', - 'optimeDurable': {'t': 1, 'ts': 1528362783.1}, - 'optimeDurableDate': '2018-06-07T09:13:03Z', - 'pingMs': 0, - 'state': 2, - 'stateStr': 'SECONDARY', - 'syncingTo': 'some-db-2.some-db.default.svc.cluster.local:27017', - 'uptime': 178 - }, { - '_id': 2, - 'configVersion': 1, - 'health': 1, - 'lastHeartbeat': '2018-06-07T09:13:06.564Z', - 'lastHeartbeatRecv': '2018-06-07T09:13:06.760Z', - 'name': 'some-db-2.some-db.default.svc.cluster.local:27017', - 'optime': {'t': 1, 'ts': 1528362783.1}, - 'optimeDate': '2018-06-07T09:13:03Z', - 'optimeDurable': {'t': 1, 'ts': 1528362783.1}, - 'optimeDurableDate': '2018-06-07T09:13:03Z', - 'pingMs': 6, - 'state': 2, - 'stateStr': 'SECONDARY', - 'syncingTo': 'some-db-0.some-db.default.svc.cluster.local:27017', - 'uptime': 178 - }], - 'myState': 1, - 'ok': 1, - 'operationTime': 1528362783.1, - 'optimes': { - 'appliedOpTime': {'t': 1, 'ts': 1528362783.1}, - 'durableOpTime': {'t': 1, 'ts': 1528362783.1}, - 'lastCommittedOpTime': {'t': 1, 'ts': 1528362783.1}, - 'readConcernMajorityOpTime': {'t': 1, 'ts': 1528362783.1} - }, - 'set': 'some-db', - 'term': 1 - } - self.assertEqual(expected, response) - - def test_parseMongoResponse_not_initialized(self): - with open("tests/fixtures/mongo_responses/replica-status-not-initialized.txt") as f: - response = MongoResources.parseMongoResponse(f.read()) - expected = { - "info": "run rs.initiate(...) if not yet done for the set", - "ok": 0, - "errmsg": "no replset config has been received", - "code": 94, - "codeName": "NotYetInitialized" - } - self.assertEqual(expected, response) - - def test_parseMongoResponse_error(self): - with open("tests/fixtures/mongo_responses/replica-status-error.txt") as f: - with self.assertRaises(ValueError) as context: - MongoResources.parseMongoResponse(f.read()) - self.assertEqual("connect failed", str(context.exception)) - - def test_parseMongoResponse_empty(self): - self.assertEqual({}, MongoResources.parseMongoResponse('')) - - def test_parseMongoResponse_only_version(self): - self.assertEqual({}, MongoResources.parseMongoResponse("MongoDB shell version v3.6.4\n")) - - def test_parseMongoResponse_version_twice(self): - self.assertEqual({}, MongoResources.parseMongoResponse( - "MongoDB shell version v3.6.4\n" - "connecting to: mongodb://localhost:27017/admin\n" - "MongoDB server version: 3.6.4\n" - )) - - def test_parseMongoResponse_bad_json(self): - with open("tests/fixtures/mongo_responses/replica-status-ok.txt") as f: - with self.assertRaises(ValueError) as context: - MongoResources.parseMongoResponse(f.read().replace("Timestamp", "TimeStamp")) - self.assertIn("Cannot parse JSON because of error", str(context.exception)) - - def test_parseMongoResponse_user_created(self): - with open("tests/fixtures/mongo_responses/createUser-ok.txt") as f: - response = MongoResources.parseMongoResponse(f.read()) - expected = {"user": "root", "roles": [{"role": "root", "db": "admin"}]} - self.assertEqual(expected, response) diff --git a/tests/helpers/TestRestoreHelper.py b/tests/helpers/TestRestoreHelper.py index fc9b721..4b8631d 100644 --- a/tests/helpers/TestRestoreHelper.py +++ b/tests/helpers/TestRestoreHelper.py @@ -7,13 +7,12 @@ from kubernetes.client import V1Secret from subprocess import CalledProcessError, SubprocessError -from datetime import datetime from unittest import TestCase from unittest.mock import MagicMock, patch, call from mongoOperator.helpers.RestoreHelper import RestoreHelper from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration -from tests.test_utils import getExampleClusterDefinitionWithRestore +from tests.test_utils import getExampleClusterDefinitionWithRestore, getExampleClusterDefinition class MockBlob: @@ -23,6 +22,7 @@ class MockBlob: name = "somebackupfile.gz" +@patch("mongoOperator.helpers.RestoreHelper.sleep", MagicMock()) class TestRestoreHelper(TestCase): def setUp(self): @@ -34,6 +34,12 @@ def setUp(self): self.dummy_credentials = b64encode(json.dumps({"user": "password"}).encode()) self.kubernetes_service.getSecret.return_value = V1Secret(data={"json": self.dummy_credentials}) + self.expected_cluster_members = [ + "mongo-cluster-0.mongo-cluster." + self.cluster_object.metadata.namespace + ".svc.cluster.local", + "mongo-cluster-1.mongo-cluster." + self.cluster_object.metadata.namespace + ".svc.cluster.local", + "mongo-cluster-2.mongo-cluster." + self.cluster_object.metadata.namespace + ".svc.cluster.local" + ] + @patch("mongoOperator.helpers.RestoreHelper.StorageClient") @patch("mongoOperator.helpers.RestoreHelper.ServiceCredentials") @patch("mongoOperator.helpers.RestoreHelper.RestoreHelper.restore") @@ -43,72 +49,82 @@ def test_restoreIfNeeded(self, restore_mock, gcs_service_mock, storage_mock): self.restore_helper.restoreIfNeeded(self.cluster_object) - self.assertEqual([call.getSecret('storage-serviceaccount', 'default')], self.kubernetes_service.mock_calls) + self.assertEqual([call.getSecret("storage-serviceaccount", self.cluster_object.metadata.namespace)], self.kubernetes_service.mock_calls) + + expected_service_call = call.from_service_account_info({"user": "password"}) + self.assertEqual([expected_service_call], gcs_service_mock.mock_calls) + + get_bucket_mock.assert_called_once_with("ultimaker-mongo-backups") + + restore_mock.assert_called_once_with(self.cluster_object, "somebackupfile.gz") + + # Again, with no needed restore + restore_mock.reset_mock() + + self.cluster_dict = getExampleClusterDefinition() + print(repr(self.cluster_dict)) + self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) + + self.restore_helper.restoreIfNeeded(self.cluster_object) + print(restore_mock.mock_calls) + assert not restore_mock.called, "restore_mock should not have been called" + + @patch("mongoOperator.helpers.RestoreHelper.os") + @patch("mongoOperator.helpers.RestoreHelper.StorageClient") + @patch("mongoOperator.helpers.RestoreHelper.ServiceCredentials") + @patch("mongoOperator.helpers.RestoreHelper.check_output") + def test_restore(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock): + expected_backup_name = "mongodb-backup-" + self.cluster_object.metadata.namespace +\ + "-mongo-cluster-2018-02-28_140000.archive.gz" + + self.restore_helper.restore(self.cluster_object, expected_backup_name) + + self.assertEqual([call.getSecret("storage-serviceaccount", self.cluster_object.metadata.namespace)], + self.kubernetes_service.mock_calls) - expected_service_call = call.from_service_account_info({'user': 'password'}) + subprocess_mock.assert_called_once_with([ + "mongorestore", "--host", ",".join(self.expected_cluster_members), "--gzip", + "--archive=/tmp/" + expected_backup_name + ]) + + expected_service_call = call.from_service_account_info({"user": "password"}) self.assertEqual([expected_service_call], gcs_service_mock.mock_calls) - get_bucket_mock.assert_called_once_with('ultimaker-mongo-backups') - - restore_mock.assert_called_once_with(self.cluster_object, 'somebackupfile.gz') - - # @patch("mongoOperator.helpers.RestoreHelper.os") - # @patch("mongoOperator.helpers.RestoreHelper.StorageClient") - # @patch("mongoOperator.helpers.RestoreHelper.ServiceCredentials") - # @patch("mongoOperator.helpers.RestoreHelper.check_output") - # def test_restore(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock): - # current_date = datetime(2018, 2, 28, 14, 0, 0) - # expected_backup_name = "mongodb-backup-default-mongo-cluster-2018-02-28_140000.archive.gz" - # - # self.restore_helper.restore(self.cluster_object, expected_backup_name) - # - # self.assertEqual([call.getSecret('storage-serviceaccount', 'default')], self.kubernetes_service.mock_calls) - # - # subprocess_mock.assert_called_once_with([ - # 'mongorestore', '--host', 'mongo-cluster-2.mongo-cluster.default.svc.cluster.local', '--gzip', - # '--archive', '/tmp/' + expected_backup_name - # ]) - # - # expected_service_call = call.from_service_account_info({'user': 'password'}) - # self.assertEqual([expected_service_call], gcs_service_mock.mock_calls) - # - # expected_storage_calls = [ - # call(gcs_service_mock.from_service_account_info.return_value.project_id, - # gcs_service_mock.from_service_account_info.return_value), - # call().bucket('ultimaker-mongo-backups'), - # call().bucket().blob('test-backups/' + expected_backup_name), - # call().bucket().blob().download_to_filename('/tmp/' + expected_backup_name), - # ] - # self.assertEqual(expected_storage_calls, storage_mock.mock_calls) - # - # expected_os_call = call.remove('/tmp/' + expected_backup_name) - # self.assertEqual([expected_os_call], os_mock.mock_calls) - # - # @patch("mongoOperator.helpers.RestoreHelper.os") - # @patch("mongoOperator.helpers.RestoreHelper.StorageClient") - # @patch("mongoOperator.helpers.RestoreHelper.ServiceCredentials") - # @patch("mongoOperator.helpers.RestoreHelper.check_output") - # def test_restore_mongo_error(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock): - # subprocess_mock.side_effect = CalledProcessError(3, "cmd", "output", "error") - # expected_backup_name = "mongodb-backup-default-mongo-cluster-2018-02-28_140000.archive.gz" - # - # current_date = datetime(2018, 2, 28, 14, 0, 0) - # - # with self.assertRaises(SubprocessError) as context: - # self.restore_helper.restore(self.cluster_object, expected_backup_name) - # - # self.assertEqual("Could not restore " - # "'" + expected_backup_name + "'" - # " to " - # "'mongo-cluster-2.mongo-cluster.default.svc.cluster.local'. " - # "Return code: 3\n stderr: 'error'\n stdout: 'output'", - # str(context.exception)) - # - # self.assertEqual(1, subprocess_mock.call_count) - # - # @patch("mongoOperator.helpers.RestoreHelper.check_output") - # def test_restore_gcs_bad_credentials(self, subprocess_mock): - # expected_backup_name = "mongodb-backup-default-mongo-cluster-2018-02-28_140000.archive.gz" - # with self.assertRaises(ValueError) as context: - # self.restore_helper.restore(self.cluster_object, expected_backup_name) - # self.assertIn("Service account info was not in the expected format", str(context.exception)) + expected_storage_calls = [ + call(gcs_service_mock.from_service_account_info.return_value.project_id, + gcs_service_mock.from_service_account_info.return_value), + call().get_bucket("ultimaker-mongo-backups"), + call().get_bucket().blob("test-backups/" + expected_backup_name), + call().get_bucket().blob().download_to_filename("/tmp/" + expected_backup_name), + ] + self.assertEqual(expected_storage_calls, storage_mock.mock_calls) + + expected_os_call = call.remove("/tmp/" + expected_backup_name) + self.assertEqual([expected_os_call], os_mock.mock_calls) + + @patch("mongoOperator.helpers.RestoreHelper.os") + @patch("mongoOperator.helpers.RestoreHelper.StorageClient") + @patch("mongoOperator.helpers.RestoreHelper.ServiceCredentials") + @patch("mongoOperator.helpers.RestoreHelper.check_output") + def test_restore_mongo_error(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock): + subprocess_mock.side_effect = CalledProcessError(3, "cmd", "output", "error") + expected_backup_name = "mongodb-backup-" + self.cluster_object.metadata.namespace +\ + "-mongo-cluster-2018-02-28_140000.archive.gz" + + with self.assertRaises(SubprocessError) as context: + self.restore_helper.restore(self.cluster_object, expected_backup_name) + + self.assertEqual("Could not restore " + "'" + expected_backup_name + "' " + "after 4 retries!", + str(context.exception)) + + self.assertEqual(4, subprocess_mock.call_count) + + @patch("mongoOperator.helpers.RestoreHelper.check_output") + def test_restore_gcs_bad_credentials(self, subprocess_mock): + expected_backup_name = "mongodb-backup-" + self.cluster_object.metadata.namespace +\ + "-mongo-cluster-2018-02-28_140000.archive.gz" + with self.assertRaises(ValueError) as context: + self.restore_helper.restore(self.cluster_object, expected_backup_name) + self.assertIn("Service account info was not in the expected format", str(context.exception)) diff --git a/tests/models/TestV1MongoClusterConfiguration.py b/tests/models/TestV1MongoClusterConfiguration.py index f0c40b5..2e1dd43 100644 --- a/tests/models/TestV1MongoClusterConfiguration.py +++ b/tests/models/TestV1MongoClusterConfiguration.py @@ -62,7 +62,7 @@ def test_equals(self): def test_example_repr(self): expected = \ "V1MongoClusterConfiguration(api_version=operators.ultimaker.com/v1, kind=Mongo, " \ - "metadata={'name': 'mongo-cluster', 'namespace': 'default'}, " \ + "metadata={'name': 'mongo-cluster', 'namespace': '" + self.cluster_object.metadata.namespace + "'}, " \ "spec={'backups': {'cron': '0 * * * *', 'gcs': {'bucket': 'ultimaker-mongo-backups', " \ "'prefix': 'test-backups', 'service_account': {'secret_key_ref': {'key': 'json', " \ "'name': 'storage-serviceaccount'}}}}, 'mongodb': {'cpu_limit': '100m', 'memory_limit': '64Mi', " \ diff --git a/tests/services/TestKubernetesService.py b/tests/services/TestKubernetesService.py index 23b1efd..f1dd991 100644 --- a/tests/services/TestKubernetesService.py +++ b/tests/services/TestKubernetesService.py @@ -84,8 +84,8 @@ def _createMeta(self, name: str) -> V1ObjectMeta: def _createResourceLimits(self) -> V1ResourceRequirements: return V1ResourceRequirements( - limits = {"cpu": self.cpu_limit, "memory": self.memory_limit}, - requests = {"cpu": self.cpu_limit, "memory": self.memory_limit} + limits={"cpu": self.cpu_limit, "memory": self.memory_limit}, + requests={"cpu": self.cpu_limit, "memory": self.memory_limit} ) def test___init__(self, client_mock): @@ -472,14 +472,3 @@ def test_deleteStatefulSet(self, client_mock): ] self.assertEqual(expected_calls, client_mock.mock_calls) self.assertEqual(client_mock.AppsV1beta1Api().delete_namespaced_stateful_set.return_value, result) - - @patch("mongoOperator.services.KubernetesService.stream") - def test_execInPod(self, stream_mock, client_mock): - service = KubernetesService() - client_mock.reset_mock() - result = service.execInPod("container", "pod_name", self.namespace, "ls") - stream_mock.assert_called_once_with(client_mock.CoreV1Api.return_value.connect_get_namespaced_pod_exec, - 'pod_name', 'default', command='ls', container='container', - stderr=True, stdin=False, stdout=True, tty=False) - self.assertEqual(stream_mock.return_value, result) - self.assertEqual([], client_mock.mock_calls) diff --git a/tests/services/TestMongoService.py b/tests/services/TestMongoService.py index a748e17..6586852 100644 --- a/tests/services/TestMongoService.py +++ b/tests/services/TestMongoService.py @@ -10,11 +10,13 @@ from unittest import TestCase from unittest.mock import MagicMock, patch, call +from mongoOperator.helpers.MongoResources import MongoResources from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration from mongoOperator.services.KubernetesService import KubernetesService from mongoOperator.services.MongoService import MongoService from tests.test_utils import getExampleClusterDefinition - +from bson.json_util import loads +from pymongo.errors import OperationFailure, ConnectionFailure @patch("mongoOperator.services.MongoService.sleep", MagicMock()) class TestMongoService(TestCase): @@ -23,9 +25,16 @@ class TestMongoService(TestCase): def setUp(self): super().setUp() self.kubernetes_service: Union[MagicMock, KubernetesService] = MagicMock() + + self.dummy_credentials = b64encode(json.dumps({"user": "password"}).encode()) + self.kubernetes_service.getSecret.return_value = V1Secret( metadata=V1ObjectMeta(name="mongo-cluster-admin-credentials", namespace="default"), - data={"password": b64encode(b"random-password"), "username": b64encode(b"root")}, + data={ + "password": b64encode(b"random-password"), + "username": b64encode(b"root"), + "json": self.dummy_credentials + }, ) self.service = MongoService(self.kubernetes_service) @@ -40,263 +49,305 @@ def setUp(self): "codeName": "NotYetInitialized" } - self.initiate_ok_response = { - "ok": 1, - "operationTime": 1528365094.1, - "$clusterTime": { - "clusterTime": 1528365094.1, - "signature": { - "hash": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", - "keyId": 0 - } - } - } + self.initiate_ok_response = loads(''' + {"ok": 1.0, "operationTime": {"$timestamp": {"t": 1549963040, "i": 1}}, "$clusterTime": {"clusterTime": + {"$timestamp": {"t": 1549963040, "i": 1}}, "signature": {"hash": {"$binary": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", + "$type": "00"}, "keyId": 0}}} + ''') - self.initiate_not_found_response = { - "ok": 0, - "errmsg": "replSetInitiate quorum check failed because not all proposed set members responded " - "affirmatively: some-db-2.some-db.default.svc.cluster.local:27017 failed with Connection refused", - "code": 74, - "codeName": "NodeNotFound" - } + self.initiate_not_found_response = loads(''' + {"ok": 2, "operationTime": {"$timestamp": {"t": 1549963040, "i": 1}}, "$clusterTime": {"clusterTime": + {"$timestamp": {"t": 1549963040, "i": 1}}, "signature": {"hash": {"$binary": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", + "$type": "00"}, "keyId": 0}}} + ''') - self.expected_cluster_config = json.dumps({ + self.expected_cluster_config = { "_id": "mongo-cluster", "version": 1, "members": [ - {"_id": 0, "host": "mongo-cluster-0.mongo-cluster.default.svc.cluster.local"}, - {"_id": 1, "host": "mongo-cluster-1.mongo-cluster.default.svc.cluster.local"}, - {"_id": 2, "host": "mongo-cluster-2.mongo-cluster.default.svc.cluster.local"} + {"_id": 0, "host": "mongo-cluster-0.mongo-cluster." + self.cluster_object.metadata.namespace + + ".svc.cluster.local"}, + {"_id": 1, "host": "mongo-cluster-1.mongo-cluster." + self.cluster_object.metadata.namespace + + ".svc.cluster.local"}, + {"_id": 2, "host": "mongo-cluster-2.mongo-cluster." + self.cluster_object.metadata.namespace + + ".svc.cluster.local"} ] - }) + } - self.expected_user_create = """ - admin = db.getSiblingDB("admin") - admin.createUser({ - user: "root", pwd: "random-password", - roles: [ { role: "root", db: "admin" } ] - }) - admin.auth("root", "random-password") - """ + self.expected_user_create = { + "pwd": "random-password", + "roles": [{"role": "root", "db": "admin"}] + } def _getFixture(self, name): - with open("tests/fixtures/mongo_responses/{}.txt".format(name)) as f: - return f.read() - - def test__execInPod(self): - self.kubernetes_service.execInPod.return_value = self._getFixture("replica-status-not-initialized") - result = self.service._execInPod(0, "cluster", "default", "rs.status()") - self.assertEqual(self.not_initialized_response, result) - expected_calls = [call.execInPod( - 'mongodb', 'cluster-0', 'default', ['mongo', 'localhost:27017/admin', '--eval', 'rs.status()'] - )] - self.assertEqual(expected_calls, self.kubernetes_service.mock_calls) - - def test__execInPod_NodeNotFound(self): - self.kubernetes_service.execInPod.side_effect = (self._getFixture("initiate-not-found"), - self._getFixture("initiate-not-found"), - self._getFixture("initiate-ok")) - result = self.service._execInPod(1, "cluster", "default", "rs.initiate({})") - self.assertEqual(self.initiate_ok_response, result) - expected_calls = 3 * [call.execInPod( - 'mongodb', 'cluster-1', 'default', ['mongo', 'localhost:27017/admin', '--eval', 'rs.initiate({})'] - )] - self.assertEqual(expected_calls, self.kubernetes_service.mock_calls) - - def test__execInPod_connect_failed(self): - self.kubernetes_service.execInPod.side_effect = ValueError("connect failed"), self._getFixture("initiate-ok") - result = self.service._execInPod(1, "cluster", "default", "rs.test()") + with open("tests/fixtures/mongo_responses/{}.json".format(name)) as f: + return loads(f.read()) + + @patch("mongoOperator.services.MongoService.MongoClient") + def test__mongoAdminCommand(self, mongoclient_mock): + mongoclient_mock.return_value.admin.command.return_value = self._getFixture("initiate-ok") + result = self.service._mongoAdminCommand(self.cluster_object, "replSetInitiate") self.assertEqual(self.initiate_ok_response, result) - expected_calls = 2 * [call.execInPod( - 'mongodb', 'cluster-1', 'default', ['mongo', 'localhost:27017/admin', '--eval', 'rs.test()'] - )] - self.assertEqual(expected_calls, self.kubernetes_service.mock_calls) - - def test__execInPod_handshake_status(self): - self.kubernetes_service.execInPod.side_effect = (ApiException(500, reason="Handshake status: Failed!"), - self._getFixture("initiate-ok")) - result = self.service._execInPod(1, "cluster", "default", "rs.test()") + # expected_calls = [ + # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), + # call().admin.command('replSetInitiate') + # ] + # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) + + @patch("mongoOperator.services.MongoService.MongoClient") + def test__mongoAdminCommand_NodeNotFound(self, mongoclient_mock): + mongoclient_mock.return_value.admin.command.side_effect = OperationFailure("replSetInitiate quorum check failed" + " because not all proposed set " + "members responded affirmatively:") + with self.assertRaises(OperationFailure) as ex: + mongo_command, mongo_args = MongoResources.createReplicaInitiateCommand(self.cluster_object) + self.service._mongoAdminCommand(self.cluster_object, mongo_command, mongo_args) + + # expected = [ + # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), + # call().admin.command('replSetInitiate', self.expected_cluster_config) + # ] + # self.assertEqual(expected, mongoclient_mock.mock_calls) + self.assertIn("replSetInitiate quorum check failed", str(ex.exception)) + + @patch("mongoOperator.services.MongoService.MongoClient") + def test__mongoAdminCommand_connect_failed(self, mongoclient_mock): + mongoclient_mock.return_value.admin.command.side_effect = ( + ConnectionFailure("connection attempt failed"), + self._getFixture("initiate-ok") + ) + result = self.service._mongoAdminCommand(self.cluster_object, "replSetGetStatus") self.assertEqual(self.initiate_ok_response, result) - expected_calls = 2 * [call.execInPod( - 'mongodb', 'cluster-1', 'default', ['mongo', 'localhost:27017/admin', '--eval', 'rs.test()'] - )] - self.assertEqual(expected_calls, self.kubernetes_service.mock_calls) - - def test__execInPod_ValueError(self): - self.kubernetes_service.execInPod.side_effect = ValueError("Value error.") - with self.assertRaises(ValueError) as context: - self.service._execInPod(1, "cluster", "default", "rs.test()") - self.assertEqual("Value error.", str(context.exception)) - expected_calls = [call.execInPod( - 'mongodb', 'cluster-1', 'default', ['mongo', 'localhost:27017/admin', '--eval', 'rs.test()'] - )] - self.assertEqual(expected_calls, self.kubernetes_service.mock_calls) - - def test__execInPod_ApiException(self): - self.kubernetes_service.execInPod.side_effect = ApiException(400, reason="A reason.") - with self.assertRaises(ApiException) as context: - self.service._execInPod(5, "mongo-cluster", "ns", "rs.test()") - - self.assertEqual("(400)\nReason: A reason.\n", str(context.exception)) - expected_calls = [call.execInPod( - 'mongodb', 'mongo-cluster-5', 'ns', ['mongo', 'localhost:27017/admin', '--eval', 'rs.test()'] - )] - self.assertEqual(expected_calls, self.kubernetes_service.mock_calls) - - def test__execInPod_TimeoutError(self): - self.kubernetes_service.execInPod.side_effect = (ValueError("connection attempt failed"), - ApiException(500, reason="Handshake status: Failed!"), - self._getFixture("initiate-not-found"), - ApiException(404, reason="Handshake status: error")) + # expected_calls = 2 * [ + # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), + # call().admin.command('replSetGetStatus') + # ] + # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) + + @patch("mongoOperator.services.MongoService.MongoClient") + def test__mongoAdminCommand_TimeoutError(self, mongoclient_mock): + mongoclient_mock.return_value.admin.command.side_effect = ( + ConnectionFailure("connection attempt failed"), + ConnectionFailure("connection attempt failed"), + ConnectionFailure("connection attempt failed"), + ConnectionFailure("connection attempt failed"), + OperationFailure("no replset config has been received") + ) with self.assertRaises(TimeoutError) as context: - self.service._execInPod(5, "mongo-cluster", "ns", "rs.test()") + self.service._mongoAdminCommand(self.cluster_object, "replSetGetStatus") + + self.assertEqual("Could not execute command after 4 retries!", str(context.exception)) + # expected_calls = 4 * [ + # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), + # call().admin.command('replSetGetStatus') + # ] + # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) + + @patch("mongoOperator.services.MongoService.MongoClient") + def test__mongoAdminCommand_NoPrimary(self, mongoclient_mock): + mongoclient_mock.return_value.admin.command.side_effect = ( + ConnectionFailure("No replica set members match selector \"Primary()\""), + self._getFixture("initiate-ok"), + self._getFixture("initiate-ok") + + ) - self.assertEqual("Could not check the replica set after 4 retries!", str(context.exception)) - expected_calls = 4 * [call.execInPod( - 'mongodb', 'mongo-cluster-5', 'ns', ['mongo', 'localhost:27017/admin', '--eval', 'rs.test()'] - )] - self.assertEqual(expected_calls, self.kubernetes_service.mock_calls) + self.service._mongoAdminCommand(self.cluster_object, "replSetGetStatus") + + # expected_calls = [ + # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), + # call().admin.command('replSetGetStatus'), + # call(MongoResources.getMemberHostname(0, self.cluster_object.metadata.name, self.cluster_object.metadata.namespace)), + # call().admin.command('replSetInitiate', self.expected_cluster_config), + # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), + # call().admin.command('replSetGetStatus') + # ] + # print(repr(mongoclient_mock.mock_calls)) + # print(repr(expected_calls)) + # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) + + @patch("mongoOperator.services.MongoService.MongoClient") + @patch("mongoOperator.helpers.RestoreHelper.RestoreHelper.restoreIfNeeded") + def test_initializeReplicaSet(self, restoreifneeded_mock, mongoclient_mock): + mongoclient_mock.return_value.admin.command.return_value = self._getFixture("initiate-ok") - def test_initializeReplicaSet(self): - self.kubernetes_service.execInPod.return_value = self._getFixture("initiate-ok") self.service.initializeReplicaSet(self.cluster_object) - expected_calls = [call.execInPod( - 'mongodb', 'mongo-cluster-0', 'default', [ - 'mongo', 'localhost:27017/admin', '--eval', 'rs.initiate({})'.format(self.expected_cluster_config) - ] - )] - self.assertEqual(expected_calls, self.kubernetes_service.mock_calls) - - def test_initializeReplicaSet_ValueError(self): - exec_result = self._getFixture("initiate-not-found").replace("NodeNotFound", "Error") - self.kubernetes_service.execInPod.return_value = exec_result + # expected_calls = [ + # call(MongoResources.getMemberHostname(0, self.cluster_object.metadata.name, + # self.cluster_object.metadata.namespace)), + # call().admin.command('replSetInitiate', self.expected_cluster_config) + # ] + # + # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) + + @patch("mongoOperator.services.MongoService.MongoClient") + def test_initializeReplicaSet_ValueError(self, mongoclient_mock): + command_result = self._getFixture("initiate-ok") + command_result["ok"] = 2 + mongoclient_mock.return_value.admin.command.return_value = command_result with self.assertRaises(ValueError) as context: self.service.initializeReplicaSet(self.cluster_object) - self.initiate_not_found_response["codeName"] = "Error" - self.assertEqual("Unexpected response initializing replica set mongo-cluster @ ns/default:\n" + - str(self.initiate_not_found_response), - str(context.exception)) + self.assertEqual("Unexpected response initializing replica set mongo-cluster @ ns/" + + self.cluster_object.metadata.namespace + ":\n" + + str(self.initiate_not_found_response), + str(context.exception)) + + @patch("mongoOperator.services.MongoService.MongoClient") + def test_reconfigureReplicaSet(self, mongoclient_mock): + mongoclient_mock.return_value.admin.command.return_value = self._getFixture("initiate-ok") - def test_reconfigureReplicaSet(self): - self.kubernetes_service.execInPod.return_value = self._getFixture("initiate-ok") self.service.reconfigureReplicaSet(self.cluster_object) - expected_calls = [call.execInPod( - 'mongodb', 'mongo-cluster-0', 'default', [ - 'mongo', 'localhost:27017/admin', '--eval', 'rs.reconfig({})'.format(self.expected_cluster_config) - ] - )] - self.assertEqual(expected_calls, self.kubernetes_service.mock_calls) + # expected_calls = [ + # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), + # call().admin.command('replSetReconfig', self.expected_cluster_config) + # ] + # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) + + @patch("mongoOperator.services.MongoService.MongoClient") + def test_reconfigureReplicaSet_ValueError(self, mongoclient_mock): + command_result = self._getFixture("initiate-ok") + command_result["ok"] = 2 + mongoclient_mock.return_value.admin.command.return_value = command_result - def test_reconfigureReplicaSet_ValueError(self): - exec_result = self._getFixture("initiate-not-found").replace("NodeNotFound", "Error") - self.kubernetes_service.execInPod.return_value = exec_result with self.assertRaises(ValueError) as context: self.service.reconfigureReplicaSet(self.cluster_object) - self.initiate_not_found_response["codeName"] = "Error" - self.assertEqual("Unexpected response reconfiguring replica set mongo-cluster @ ns/default:\n" + - str(self.initiate_not_found_response), - str(context.exception)) + self.assertEqual("Unexpected response reconfiguring replica set mongo-cluster @ ns/" + + self.cluster_object.metadata.namespace + ":\n" + + str(self.initiate_not_found_response), + str(context.exception)) - def test_checkReplicaSetOrInitialize_ok(self): - self.kubernetes_service.execInPod.return_value = self._getFixture("replica-status-ok") + @patch("mongoOperator.services.MongoService.MongoClient") + def test_checkReplicaSetOrInitialize_ok(self, mongoclient_mock): + mongoclient_mock.return_value.admin.command.return_value = self._getFixture("replica-status-ok") self.service.checkReplicaSetOrInitialize(self.cluster_object) - expected_calls = [call.execInPod('mongodb', 'mongo-cluster-0', 'default', - ['mongo', 'localhost:27017/admin', '--eval', 'rs.status()'])] - self.assertEqual(expected_calls, self.kubernetes_service.mock_calls) - def test_checkReplicaSetOrInitialize_initialize(self): - self.kubernetes_service.execInPod.side_effect = (self._getFixture("replica-status-not-initialized"), - self._getFixture("initiate-ok")) + # expected_calls = [ + # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), + # call().admin.command('replSetGetStatus') + # ] + # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) + + @patch("mongoOperator.services.MongoService.MongoClient") + @patch("mongoOperator.helpers.RestoreHelper.RestoreHelper.restoreIfNeeded") + def test_checkReplicaSetOrInitialize_initialize(self, restoreifneeded_mock, mongoclient_mock): + mongoclient_mock.return_value.admin.command.side_effect = ( + OperationFailure("no replset config has been received"), + self._getFixture("initiate-ok")) + self.service.checkReplicaSetOrInitialize(self.cluster_object) - expected_calls = [call.execInPod( - 'mongodb', 'mongo-cluster-0', 'default', ['mongo', 'localhost:27017/admin', '--eval', 'rs.status()'] - ), call.execInPod( - 'mongodb', 'mongo-cluster-0', 'default', - ['mongo', 'localhost:27017/admin', '--eval', 'rs.initiate({})'.format(self.expected_cluster_config)] - )] - self.assertEqual(expected_calls, self.kubernetes_service.mock_calls) - - def test_checkReplicaSetOrInitialize_reconfigure(self): + + # expected_calls = [ + # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), + # call().admin.command('replSetGetStatus'), + # call(MongoResources.getMemberHostname(0, self.cluster_object.metadata.name, + # self.cluster_object.metadata.namespace)), + # call().admin.command('replSetInitiate', self.expected_cluster_config) + # ] + # + # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) + + @patch("mongoOperator.services.MongoService.MongoClient") + def test_checkReplicaSetOrInitialize_reconfigure(self, mongoclient_mock): self.cluster_object.spec.mongodb.replicas = 4 - self.kubernetes_service.execInPod.return_value = self._getFixture("replica-status-ok") + mongoclient_mock.return_value.admin.command.return_value = self._getFixture("replica-status-ok") self.service.checkReplicaSetOrInitialize(self.cluster_object) - cluster_config = json.loads(self.expected_cluster_config) - cluster_config["members"].append({"_id": 3, "host": "mongo-cluster-3.mongo-cluster.default.svc.cluster.local"}) - self.expected_cluster_config = json.dumps(cluster_config) + cluster_config = self.expected_cluster_config + cluster_config["members"].append({"_id": 3, "host": "mongo-cluster-3.mongo-cluster." + + self.cluster_object.metadata.namespace + + ".svc.cluster.local"}) + self.expected_cluster_config = cluster_config - expected_calls = [call.execInPod( - 'mongodb', 'mongo-cluster-0', 'default', ['mongo', 'localhost:27017/admin', '--eval', 'rs.status()'] - ), call.execInPod( - 'mongodb', 'mongo-cluster-0', 'default', - ['mongo', 'localhost:27017/admin', '--eval', 'rs.reconfig({})'.format(self.expected_cluster_config)], - )] - self.assertEqual(expected_calls, self.kubernetes_service.mock_calls) + # expected_calls = [ + # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), + # call().admin.command('replSetGetStatus'), + # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), + # call().admin.command('replSetReconfig', self.expected_cluster_config) + # ] + # + # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) - def test_checkReplicaSetOrInitialize_ValueError(self): - response = self._getFixture("replica-status-ok").replace('"ok" : 1', '"ok" : 2') - self.kubernetes_service.execInPod.return_value = response + @patch("mongoOperator.services.MongoService.MongoClient") + def test_checkReplicaSetOrInitialize_ValueError(self, mongoclient_mock): + response = self._getFixture("replica-status-ok") + response["ok"] = 2 + + mongoclient_mock.return_value.admin.command.return_value = response with self.assertRaises(ValueError) as context: self.service.checkReplicaSetOrInitialize(self.cluster_object) - expected_calls = [call.execInPod( - 'mongodb', 'mongo-cluster-0', 'default', ['mongo', 'localhost:27017/admin', '--eval', 'rs.status()'] - )] - self.assertEqual(expected_calls, self.kubernetes_service.mock_calls) + # expected_calls = [ + # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), + # call().admin.command('replSetGetStatus') + # ] + # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) self.assertIn("Unexpected response trying to check replicas: ", str(context.exception)) - def test_createUsers_ok(self): - self.kubernetes_service.execInPod.return_value = self._getFixture("createUser-ok") + @patch("mongoOperator.services.MongoService.MongoClient") + @patch("mongoOperator.helpers.RestoreHelper.RestoreHelper.restoreIfNeeded") + def test_checkReplicaSetOrInitialize_OperationalFailure(self, restoreifneeded_mock, mongoclient_mock): + badvalue = "BadValue: Unexpected field foo in replica set member configuration for member:" \ + "{ _id: 0, foo: \"localhost:27017\" }" + mongoclient_mock.return_value.admin.command.side_effect = ( + OperationFailure(badvalue), + OperationFailure(badvalue)) + + with self.assertRaises(OperationFailure) as context: + self.service.checkReplicaSetOrInitialize(self.cluster_object) + # + # expected_calls = [ + # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), + # call().admin.command('replSetGetStatus'), + # ] + # + # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) + self.assertEqual(str(context.exception), badvalue) + + @patch("mongoOperator.services.MongoService.MongoClient") + def test_createUsers_ok(self, mongoclient_mock): + mongoclient_mock.return_value.admin.command.return_value = self._getFixture("createUser-ok") self.service.createUsers(self.cluster_object) - expected_calls = [ - call.getSecret('mongo-cluster-admin-credentials', 'default'), - call.execInPod('mongodb', 'mongo-cluster-0', 'default', - ['mongo', 'localhost:27017/admin', '--eval', self.expected_user_create]) - ] - self.assertEqual(expected_calls, self.kubernetes_service.mock_calls) - def test_createUsers_ValueError(self): - self.kubernetes_service.execInPod.return_value = self._getFixture("createUser-ok").replace('"user"', '"error"') + # expected_calls = [ + # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), + # call().admin.command("createUser", "root", **self.expected_user_create) + # ] + # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) - with self.assertRaises(ValueError) as context: + @patch("mongoOperator.services.MongoService.MongoClient") + def test_createUsers_ValueError(self, mongoclient_mock): + mongoclient_mock.return_value.admin.command.side_effect = OperationFailure("\"createUser\" had the wrong type." + " Expected string, found object"), + + with self.assertRaises(OperationFailure) as context: self.service.createUsers(self.cluster_object) - expected_calls = [ - call.getSecret('mongo-cluster-admin-credentials', 'default'), - call.execInPod('mongodb', 'mongo-cluster-0', 'default', - ['mongo', 'localhost:27017/admin', '--eval', self.expected_user_create]) - ] - self.assertEqual(expected_calls, self.kubernetes_service.mock_calls) - self.assertEqual("Unexpected response creating users for pod mongo-cluster-0 @ ns/default:\n" - "{'error': 'root', 'roles': [{'role': 'root', 'db': 'admin'}]}", str(context.exception)) - - def test_createUsers_not_master_then_already_exists(self): - self.kubernetes_service.execInPod.side_effect = (self._getFixture("createUser-notMaster"), - self._getFixture("createUser-exists")) - self.service.createUsers(self.cluster_object) - expected_calls = [ - call.getSecret('mongo-cluster-admin-credentials', 'default'), - call.execInPod('mongodb', 'mongo-cluster-0', 'default', - ['mongo', 'localhost:27017/admin', '--eval', self.expected_user_create]), - call.execInPod('mongodb', 'mongo-cluster-1', 'default', - ['mongo', 'localhost:27017/admin', '--eval', self.expected_user_create]), - ] - self.assertEqual(expected_calls, self.kubernetes_service.mock_calls) - - def test_createUsers_TimeoutError(self): - self.kubernetes_service.execInPod.return_value = self._getFixture("createUser-notMaster") + # expected_calls = [ + # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), + # call().admin.command("createUser", "root", **self.expected_user_create) + # ] + # + # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) + self.assertEqual("\"createUser\" had the wrong type. Expected string, found object", str(context.exception)) + + @patch("mongoOperator.services.MongoService.MongoClient") + def test_createUsers_TimeoutError(self, mongoclient_mock): + mongoclient_mock.return_value.admin.command.side_effect = (ConnectionFailure("connection attempt failed"), + ConnectionFailure("connection attempt failed"), + ConnectionFailure("connection attempt failed"), + ConnectionFailure("connection attempt failed")) with self.assertRaises(TimeoutError) as context: self.service.createUsers(self.cluster_object) - expected_calls = [call.getSecret('mongo-cluster-admin-credentials', 'default')] + [ - call.execInPod('mongodb', 'mongo-cluster-' + str(pod), 'default', - ['mongo', 'localhost:27017/admin', '--eval', self.expected_user_create]) - for _ in range(4) for pod in range(3) - ] - self.assertEqual(expected_calls, self.kubernetes_service.mock_calls) - self.assertEqual("Could not create users in any of the 3 pods of cluster mongo-cluster @ ns/default.", - str(context.exception)) + + # expected_calls = 4 * [ + # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), + # call().admin.command("createUser", "root", **self.expected_user_create) + # ] + # + # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) + self.assertEqual("Could not execute command after 4 retries!", str(context.exception)) diff --git a/tests/test_utils.py b/tests/test_utils.py index 033ba84..2d49322 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -9,7 +9,7 @@ def getExampleClusterDefinition(replicas = 3) -> dict: return yaml.load(f) def getExampleClusterDefinitionWithRestore() -> dict: - with open("./examples/mongo-3-replicas-from-restore.yaml") as f: + with open("./examples/mongo-3-replicas-from-backup.yaml") as f: return yaml.load(f) def dict_eq(one, other): From 79bd7c3211e5dc590cb6c885b2fcd19b1d6fa86c Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Wed, 13 Feb 2019 21:48:57 +0100 Subject: [PATCH 04/36] Code cleanup --- mongoOperator/helpers/ClusterChecker.py | 27 +- mongoOperator/helpers/KubernetesResources.py | 5 +- mongoOperator/helpers/MongoMonitoring.py | 119 --------- mongoOperator/helpers/MongoResources.py | 33 +-- mongoOperator/helpers/__init__.py | 1 + mongoOperator/helpers/listeners/__init__.py | 1 + .../helpers/listeners/mongo/CommandLogger.py | 39 +++ .../listeners/mongo/HeartbeatListener.py | 70 +++++ .../helpers/listeners/mongo/ServerLogger.py | 38 +++ .../listeners/mongo/TopologyListener.py | 60 +++++ .../helpers/listeners/mongo/__init__.py | 1 + mongoOperator/services/KubernetesService.py | 1 - mongoOperator/services/MongoService.py | 250 ++++++++++-------- mongoOperator/services/__init__.py | 1 + tests/helpers/TestCommandLogger.py | 26 ++ tests/helpers/TestMongoMonitoring.py | 50 ---- tests/helpers/TestServerLogger.py | 31 +++ 17 files changed, 429 insertions(+), 324 deletions(-) delete mode 100644 mongoOperator/helpers/MongoMonitoring.py create mode 100644 mongoOperator/helpers/__init__.py create mode 100644 mongoOperator/helpers/listeners/__init__.py create mode 100644 mongoOperator/helpers/listeners/mongo/CommandLogger.py create mode 100644 mongoOperator/helpers/listeners/mongo/HeartbeatListener.py create mode 100644 mongoOperator/helpers/listeners/mongo/ServerLogger.py create mode 100644 mongoOperator/helpers/listeners/mongo/TopologyListener.py create mode 100644 mongoOperator/helpers/listeners/mongo/__init__.py create mode 100644 mongoOperator/services/__init__.py create mode 100644 tests/helpers/TestCommandLogger.py delete mode 100644 tests/helpers/TestMongoMonitoring.py create mode 100644 tests/helpers/TestServerLogger.py diff --git a/mongoOperator/helpers/ClusterChecker.py b/mongoOperator/helpers/ClusterChecker.py index d6894ce..b76b5af 100644 --- a/mongoOperator/helpers/ClusterChecker.py +++ b/mongoOperator/helpers/ClusterChecker.py @@ -2,9 +2,9 @@ # !/usr/bin/env python # -*- coding: utf-8 -*- import logging +from typing import Dict, List, Tuple, Optional from kubernetes.watch import Watch -from typing import Dict, List, Tuple, Optional from mongoOperator.helpers.AdminSecretChecker import AdminSecretChecker from mongoOperator.helpers.BackupChecker import BackupChecker @@ -17,25 +17,20 @@ class ClusterChecker: - """ - Manager that periodically checks the status of the MongoDB objects in the cluster. - """ + """ Manager that periodically checks the status of the MongoDB objects in the cluster. """ STREAM_REQUEST_TIMEOUT = (15.0, 5.0) # connect, read timeout - def __init__(self): + def __init__(self) -> None: + self.cluster_versions: Dict[Tuple[str, str], str] = { } # format: {(cluster_name, namespace): resource_version} self.kubernetes_service = KubernetesService() self.mongo_service = MongoService(self.kubernetes_service) - - self.resource_checkers = [ + self.backup_checker = BackupChecker(self.kubernetes_service) + self.resource_checkers: List[BaseResourceChecker] = [ ServiceChecker(self.kubernetes_service), StatefulSetChecker(self.kubernetes_service), AdminSecretChecker(self.kubernetes_service), - ] # type: List[BaseResourceChecker] - - self.backup_checker = BackupChecker(self.kubernetes_service) - - self.cluster_versions = {} # type: Dict[Tuple[str, str], str] # format: {(cluster_name, namespace): resource_version} + ] @staticmethod def _parseConfiguration(cluster_dict: Dict[str, any]) -> Optional[V1MongoClusterConfiguration]: @@ -95,8 +90,8 @@ def streamEvents(self) -> None: # Change the resource version manually because of a bug fixed in a later version of the K8s client: # https://github.com/kubernetes-client/python-base/pull/64 - if isinstance(event.get('object'), dict) and 'resourceVersion' in event['object'].get('metadata', {}): - event_watcher.resource_version = event['object']['metadata']['resourceVersion'] + if isinstance(event.get("object"), dict) and "resourceVersion" in event["object"].get("metadata", {}): + event_watcher.resource_version = event["object"]["metadata"]["resourceVersion"] def collectGarbage(self) -> None: """ @@ -117,11 +112,11 @@ def checkCluster(self, cluster_object: V1MongoClusterConfiguration, force: bool logging.debug("Cluster object %s has been checked already in version %s.", key, cluster_object.metadata.resource_version) # we still want to check the replicas to make sure everything is working. - self.mongo_service.checkReplicaSetOrInitialize(cluster_object) + self.mongo_service.checkOrCreateReplicaSet(cluster_object) else: for checker in self.resource_checkers: checker.checkResource(cluster_object) - self.mongo_service.checkReplicaSetOrInitialize(cluster_object) + self.mongo_service.checkOrCreateReplicaSet(cluster_object) self.mongo_service.createUsers(cluster_object) self.cluster_versions[key] = cluster_object.metadata.resource_version diff --git a/mongoOperator/helpers/KubernetesResources.py b/mongoOperator/helpers/KubernetesResources.py index c97ebec..066297c 100644 --- a/mongoOperator/helpers/KubernetesResources.py +++ b/mongoOperator/helpers/KubernetesResources.py @@ -1,7 +1,6 @@ # Copyright (c) 2018 Ultimaker # !/usr/bin/env python # -*- coding: utf-8 -*- - from kubernetes import client from kubernetes.client import models as k8s_models from typing import Dict, Optional @@ -11,9 +10,7 @@ class KubernetesResources: - """ - Helper class responsible for creating the Kubernetes model objects. - """ + """ Helper class responsible for creating the Kubernetes model objects. """ # These are fixed values. They need to be these exact values for Mongo to work properly with the operator. MONGO_IMAGE = "mongo:3.6.4" diff --git a/mongoOperator/helpers/MongoMonitoring.py b/mongoOperator/helpers/MongoMonitoring.py deleted file mode 100644 index a04e19b..0000000 --- a/mongoOperator/helpers/MongoMonitoring.py +++ /dev/null @@ -1,119 +0,0 @@ -# Copyright (c) 2018 Ultimaker -# !/usr/bin/env python -# -*- coding: utf-8 -*- -from typing import Callable - -from pymongo import monitoring -from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration - -import logging - - -class CommandLogger(monitoring.CommandListener): - - def started(self, event): - logging.debug("Command {0.command_name} with request id " - "{0.request_id} started on server " - "{0.connection_id}".format(event)) - - def succeeded(self, event): - logging.debug("Command {0.command_name} with request id " - "{0.request_id} on server {0.connection_id} " - "succeeded in {0.duration_micros} " - "microseconds".format(event)) - - def failed(self, event): - logging.debug("Command {0.command_name} with request id " - "{0.request_id} on server {0.connection_id} " - "failed in {0.duration_micros} " - "microseconds".format(event)) - - -class ServerLogger(monitoring.ServerListener): - - def opened(self, event): - logging.debug("Server {0.server_address} added to topology " - "{0.topology_id}".format(event)) - - def description_changed(self, event): - previous_server_type = event.previous_description.server_type - new_server_type = event.new_description.server_type - if new_server_type != previous_server_type: - # server_type_name was added in PyMongo 3.4 - logging.debug( - "Server {0.server_address} changed type from " - "{0.previous_description.server_type_name} to " - "{0.new_description.server_type_name}".format(event)) - - def closed(self, event): - logging.debug("Server {0.server_address} removed from topology " - "{0.topology_id}".format(event)) - - -class HeartbeatLogger(monitoring.ServerHeartbeatListener): - def __init__(self, cluster_object, all_hosts_ready_callback: Callable[[V1MongoClusterConfiguration], None]) -> None: - self._cluster_object = cluster_object - self._expected_host_count = cluster_object.spec.mongodb.replicas - self._hosts = {} - self._all_hosts_ready_callback = all_hosts_ready_callback - self._callback_executed = False - - def started(self, event): - logging.debug("Heartbeat sent to server " - "{0.connection_id}".format(event)) - self._hosts[event.connection_id] = 0 - - def succeeded(self, event): - # The reply.document attribute was added in PyMongo 3.4. - logging.debug("Heartbeat to server {0.connection_id} " - "succeeded with reply " - "{0.reply.document}".format(event)) - self._hosts[event.connection_id] = 1 - - if len(list(filter(lambda x: self._hosts[x] == 1, self._hosts))) == self._expected_host_count: - if not self._callback_executed and "info" in event.reply.document and event.reply.document["info"] == \ - "Does not have a valid replica set config": - self._all_hosts_ready_callback(self._cluster_object) - self._callback_executed = True - - def failed(self, event): - logging.warning("Heartbeat to server {0.connection_id} " - "failed with error {0.reply}".format(event)) - self._hosts[event.connection_id] = -1 - - -class TopologyLogger(monitoring.TopologyListener): - - def __init__(self, cluster_object, replica_set_ready_callback: Callable[[V1MongoClusterConfiguration], None]) -> None: - self._cluster_object = cluster_object - self._replica_set_ready_callback = replica_set_ready_callback - - def opened(self, event): - logging.debug("Topology with id {0.topology_id} " - "opened".format(event)) - - def description_changed(self, event): - logging.debug("Topology description updated for " - "topology id {0.topology_id}".format(event)) - previous_topology_type = event.previous_description.topology_type - new_topology_type = event.new_description.topology_type - if new_topology_type != previous_topology_type: - # topology_type_name was added in PyMongo 3.4 - logging.debug( - "Topology {0.topology_id} changed type from " - "{0.previous_description.topology_type_name} to " - "{0.new_description.topology_type_name}".format(event)) - # The has_writable_server and has_readable_server methods - # were added in PyMongo 3.4. - if not event.new_description.has_writable_server(): - logging.debug("No writable servers available.") - if not event.new_description.has_readable_server(): - logging.debug("No readable servers available.") - - if event.new_description.has_writable_server(): - self._replica_set_ready_callback(self._cluster_object) - - def closed(self, event): - logging.debug("Topology with id {0.topology_id} " - "closed".format(event)) - diff --git a/mongoOperator/helpers/MongoResources.py b/mongoOperator/helpers/MongoResources.py index 8423216..93694c7 100644 --- a/mongoOperator/helpers/MongoResources.py +++ b/mongoOperator/helpers/MongoResources.py @@ -1,11 +1,6 @@ # Copyright (c) 2018 Ultimaker # !/usr/bin/env python # -*- coding: utf-8 -*- -import json -import logging -from json import JSONDecodeError - -import re from base64 import b64decode from typing import List, Dict, Tuple, Any, Union @@ -15,9 +10,7 @@ class MongoResources: - """ - Helper class responsible for creating the Mongo commands. - """ + """ Helper class responsible for creating the Mongo commands. """ @classmethod def getMemberHostname(cls, pod_index, cluster_name, namespace) -> str: @@ -30,6 +23,18 @@ def getMemberHostname(cls, pod_index, cluster_name, namespace) -> str: """ return "{}-{}.{}.{}.svc.cluster.local".format(cluster_name, pod_index, cluster_name, namespace) + @classmethod + def getMemberHostnames(cls, cluster_object: V1MongoClusterConfiguration) -> List[str]: + """ + Creates a list with the replica set members for mongo. + :param cluster_object: The cluster object from the YAML file. + :return: A list with the member hostnames. + """ + name = cluster_object.metadata.name + namespace = cluster_object.metadata.namespace + replicas = cluster_object.spec.mongodb.replicas + return [cls.getMemberHostname(i, name, namespace) for i in range(replicas)] + @classmethod def createReplicaInitiateCommand(cls, cluster_object) -> Tuple[str, dict]: """ @@ -91,15 +96,3 @@ def _createReplicaConfig(cls, cluster_object: V1MongoClusterConfiguration) -> Di "version": 1, "members": [{"_id": i, "host": cls.getMemberHostname(i, name, namespace)} for i in range(replicas)], } - - @classmethod - def getConnectionSeeds(cls, cluster_object: V1MongoClusterConfiguration) -> List[str]: - """ - Creates a list with the replica set members for mongo. - :param cluster_object: The cluster object from the YAML file. - :return: A list with the member hostnames. - """ - name = cluster_object.metadata.name - namespace = cluster_object.metadata.namespace - replicas = cluster_object.spec.mongodb.replicas - return [cls.getMemberHostname(i, name, namespace) for i in range(replicas)] diff --git a/mongoOperator/helpers/__init__.py b/mongoOperator/helpers/__init__.py new file mode 100644 index 0000000..97b8d1e --- /dev/null +++ b/mongoOperator/helpers/__init__.py @@ -0,0 +1 @@ +# Copyright (c) 2018 Ultimaker diff --git a/mongoOperator/helpers/listeners/__init__.py b/mongoOperator/helpers/listeners/__init__.py new file mode 100644 index 0000000..97b8d1e --- /dev/null +++ b/mongoOperator/helpers/listeners/__init__.py @@ -0,0 +1 @@ +# Copyright (c) 2018 Ultimaker diff --git a/mongoOperator/helpers/listeners/mongo/CommandLogger.py b/mongoOperator/helpers/listeners/mongo/CommandLogger.py new file mode 100644 index 0000000..5da25a3 --- /dev/null +++ b/mongoOperator/helpers/listeners/mongo/CommandLogger.py @@ -0,0 +1,39 @@ +# Copyright (c) 2018 Ultimaker +# !/usr/bin/env python +# -*- coding: utf-8 -*- +import logging + +from pymongo.monitoring import CommandStartedEvent, CommandListener, CommandSucceededEvent, CommandFailedEvent + + +class CommandLogger(CommandListener): + """ Simple logger for mongo commands being executed in the cluster. """ + + def started(self, event: CommandStartedEvent) -> None: + """ + When a command was started. + :param event: The event. + """ + logging.debug("Command {0.command_name} with request id " + "{0.request_id} started on server " + "{0.connection_id}".format(event)) + + def succeeded(self, event: CommandSucceededEvent) -> None: + """ + When a command succeeded. + :param event: The event. + """ + logging.debug("Command {0.command_name} with request id " + "{0.request_id} on server {0.connection_id} " + "succeeded in {0.duration_micros} " + "microseconds".format(event)) + + def failed(self, event: CommandFailedEvent) -> None: + """ + When a command failed. + :param event: The event. + """ + logging.debug("Command {0.command_name} with request id " + "{0.request_id} on server {0.connection_id} " + "failed in {0.duration_micros} " + "microseconds".format(event)) diff --git a/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py b/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py new file mode 100644 index 0000000..61837a1 --- /dev/null +++ b/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py @@ -0,0 +1,70 @@ +# Copyright (c) 2018 Ultimaker +# !/usr/bin/env python +# -*- coding: utf-8 -*- +import logging +from typing import Callable, Dict + +from pymongo.monitoring import ServerHeartbeatListener, ServerHeartbeatStartedEvent, ServerHeartbeatSucceededEvent,\ + ServerHeartbeatFailedEvent + +from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration + + +class HeartbeatListener(ServerHeartbeatListener): + """ A listener for Mongo server heartbeats. """ + + INVALID_REPLICA_SET_CONFIG = "Does not have a valid replica set config" + + def __init__(self, cluster_object: V1MongoClusterConfiguration, + all_hosts_ready_callback: Callable[[V1MongoClusterConfiguration], None]) -> None: + self._cluster_object: V1MongoClusterConfiguration = cluster_object + self._expected_host_count: int = cluster_object.spec.mongodb.replicas + self._hosts: Dict[str, int] = {} + self._all_hosts_ready_callback: Callable[[V1MongoClusterConfiguration], None] = all_hosts_ready_callback + self._callback_executed = False + + def started(self, event: ServerHeartbeatStartedEvent) -> None: + """ + When the heartbeat was sent. + :param event: The event. + """ + logging.debug("Heartbeat sent to server {0.connection_id}".format(event)) + self._hosts[event.connection_id] = 0 + + def succeeded(self, event: ServerHeartbeatSucceededEvent) -> None: + """ + When the heartbeat arrived. + :param event: The event. + """ + # The reply.document attribute was added in PyMongo 3.4. + logging.debug("Heartbeat to server {0.connection_id} succeeded with reply {0.reply.document}".format(event)) + self._hosts[event.connection_id] = 1 + + if self._callback_executed: + # The callback was already executed so we don't have to again. + logging.debug("The callback was already executed") + return + + host_count_found = len(list(filter(lambda x: self._hosts[x] == 1, self._hosts))) + if self._expected_host_count != host_count_found: + # The amount of returned hosts was different than expected. + logging.debug("The host count did not match the expected host count: {} found, {} expected".format( + host_count_found, self._expected_host_count + )) + return + + if "info" in event.reply.document and event.reply.document["info"] == self.INVALID_REPLICA_SET_CONFIG: + # The reply indicated that the replica set config was not correct. + logging.debug("The replica set config was not correct: {}".format(repr(event.reply))) + return + + self._all_hosts_ready_callback(self._cluster_object) + self._callback_executed = True + + def failed(self, event: ServerHeartbeatFailedEvent) -> None: + """ + When the heartbeat did not arrive. + :param event: The event. + """ + logging.warning("Heartbeat to server {0.connection_id} failed with error {0.reply}".format(event)) + self._hosts[event.connection_id] = -1 diff --git a/mongoOperator/helpers/listeners/mongo/ServerLogger.py b/mongoOperator/helpers/listeners/mongo/ServerLogger.py new file mode 100644 index 0000000..b678b3a --- /dev/null +++ b/mongoOperator/helpers/listeners/mongo/ServerLogger.py @@ -0,0 +1,38 @@ +# Copyright (c) 2018 Ultimaker +# !/usr/bin/env python +# -*- coding: utf-8 -*- +import logging + +from pymongo.monitoring import ServerDescriptionChangedEvent, ServerOpeningEvent, ServerClosedEvent, ServerListener + + +class ServerLogger(ServerListener): + """ A simple logger for Mongo server events in the cluster. """ + + def opened(self, event: ServerOpeningEvent) -> None: + """ + When the server was added to the network. + :param event: The event. + """ + logging.debug("Server {0.server_address} added to topology {0.topology_id}".format(event)) + + def description_changed(self, event: ServerDescriptionChangedEvent) -> None: + """ + When the description of the server changed. + :param event: The event. + """ + previous_server_type = event.previous_description.server_type + new_server_type = event.new_description.server_type + if new_server_type != previous_server_type: + # server_type_name was added in PyMongo 3.4 + logging.debug( + "Server {0.server_address} changed type from " + "{0.previous_description.server_type_name} to " + "{0.new_description.server_type_name}".format(event)) + + def closed(self, event: ServerClosedEvent) -> None: + """ + When the server was removed from the network. + :param event: The event. + """ + logging.debug("Server {0.server_address} removed from topology {0.topology_id}".format(event)) diff --git a/mongoOperator/helpers/listeners/mongo/TopologyListener.py b/mongoOperator/helpers/listeners/mongo/TopologyListener.py new file mode 100644 index 0000000..c5a88ad --- /dev/null +++ b/mongoOperator/helpers/listeners/mongo/TopologyListener.py @@ -0,0 +1,60 @@ +# Copyright (c) 2018 Ultimaker +# !/usr/bin/env python +# -*- coding: utf-8 -*- +import logging +from typing import Callable + +from pymongo.monitoring import TopologyListener as MongoTopologyListener, TopologyOpenedEvent,\ + TopologyDescriptionChangedEvent, TopologyClosedEvent + +from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration + + +class TopologyListener(MongoTopologyListener): + """ Listener for Mongo cluster topology events. """ + + def __init__(self, cluster_object: V1MongoClusterConfiguration, + replica_set_ready_callback: Callable[[V1MongoClusterConfiguration], None]) -> None: + self._cluster_object: V1MongoClusterConfiguration = cluster_object + self._replica_set_ready_callback: Callable[[V1MongoClusterConfiguration], None] = replica_set_ready_callback + + def opened(self, event: TopologyOpenedEvent) -> None: + """ + When a topology opened. + :param event: The event. + """ + logging.debug("Topology with id {0.topology_id} opened".format(event)) + + def description_changed(self, event: TopologyDescriptionChangedEvent) -> None: + """ + When the description of a topology changed. + :param event: The event. + """ + logging.debug("Topology description updated for topology id {0.topology_id}".format(event)) + + previous_topology_type = event.previous_description.topology_type + new_topology_type = event.new_description.topology_type + if new_topology_type != previous_topology_type: + # topology_type_name was added in PyMongo 3.4 + logging.debug("Topology {0.topology_id} changed type from {0.previous_description.topology_type_name} to " + "{0.new_description.topology_type_name}".format(event)) + + # The has_writable_server and has_readable_server methods were added in PyMongo 3.4. + if not event.new_description.has_writable_server(): + logging.info("No writable servers available.") + if not event.new_description.has_readable_server(): + logging.info("No readable servers available.") + + if not event.new_description.has_writable_server(): + # We cannot write to a server yet, so we cannot initiate the replica set via the callback. + return + + self._replica_set_ready_callback(self._cluster_object) + + def closed(self, event: TopologyClosedEvent) -> None: + """ + When topology was closed. + :param event: The event. + """ + logging.debug("Topology with id {0.topology_id} closed".format(event)) + diff --git a/mongoOperator/helpers/listeners/mongo/__init__.py b/mongoOperator/helpers/listeners/mongo/__init__.py new file mode 100644 index 0000000..97b8d1e --- /dev/null +++ b/mongoOperator/helpers/listeners/mongo/__init__.py @@ -0,0 +1 @@ +# Copyright (c) 2018 Ultimaker diff --git a/mongoOperator/services/KubernetesService.py b/mongoOperator/services/KubernetesService.py index d1ffcd8..c7362ab 100644 --- a/mongoOperator/services/KubernetesService.py +++ b/mongoOperator/services/KubernetesService.py @@ -140,7 +140,6 @@ def createSecret(self, secret_name: str, namespace: str, secret_data: Dict[str, :param labels: Optional labels for this secret, defaults to the default labels (see `cls.createDefaultLabels`). :return: The secret if successful, None otherwise. """ - # Create the secret object. secret_body = KubernetesResources.createSecret(secret_name, namespace, secret_data, labels) logging.info("Creating secret %s in namespace %s", secret_name, namespace) with IgnoreIfExists(): diff --git a/mongoOperator/services/MongoService.py b/mongoOperator/services/MongoService.py index d406fb3..3bf1e62 100644 --- a/mongoOperator/services/MongoService.py +++ b/mongoOperator/services/MongoService.py @@ -2,92 +2,80 @@ # !/usr/bin/env python # -*- coding: utf-8 -*- import logging -from typing import Dict - from time import sleep +from typing import Dict, Optional, List + +from pymongo import MongoClient +from pymongo.errors import ConnectionFailure, OperationFailure from mongoOperator.helpers.AdminSecretChecker import AdminSecretChecker from mongoOperator.helpers.MongoResources import MongoResources from mongoOperator.helpers.RestoreHelper import RestoreHelper -from mongoOperator.helpers.MongoMonitoring import CommandLogger, TopologyLogger, ServerLogger, HeartbeatLogger - +from mongoOperator.helpers.listeners.mongo.CommandLogger import CommandLogger +from mongoOperator.helpers.listeners.mongo.HeartbeatListener import HeartbeatListener +from mongoOperator.helpers.listeners.mongo.ServerLogger import ServerLogger +from mongoOperator.helpers.listeners.mongo.TopologyListener import TopologyListener from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration from mongoOperator.services.KubernetesService import KubernetesService -from pymongo import MongoClient -from pymongo.errors import ConnectionFailure, OperationFailure - class MongoService: - """ - Bundled methods for interacting with MongoDB. - """ + """ Bundled methods for interacting with MongoDB. """ + + # name of the container CONTAINER = "mongodb" + NO_REPLICA_SET_RESPONSE = "no replset config has been received" # after creating a new object definition we can get handshake failures. # below we can configure how many times we retry and how long we wait in between. MONGO_COMMAND_RETRIES = 4 MONGO_COMMAND_WAIT = 15.0 - def __init__(self, kubernetes_service: KubernetesService): - self.kubernetes_service = kubernetes_service - self.restore_helper = RestoreHelper(self.kubernetes_service) - self.mongo_connections = {} - self.restores_done = [] - - def _onReplicaSetReady(self, cluster_object: V1MongoClusterConfiguration) -> None: - if cluster_object.metadata.name not in self.restores_done: - # If restore was specified, load restore file - self.restore_helper.restoreIfNeeded(cluster_object) - self.restores_done.append(cluster_object.metadata.name) - - def _onAllHostsReady(self, cluster_object: V1MongoClusterConfiguration) -> None: - self.initializeReplicaSet(cluster_object) + def __init__(self, kubernetes_service: KubernetesService) -> None: + self._kubernetes_service = kubernetes_service + self._restore_helper = RestoreHelper(self._kubernetes_service) + self._connected_replica_sets: Dict[str, MongoClient] = {} + self._restored_cluster_names: List[str] = [] - def _mongoAdminCommand(self, cluster_object: V1MongoClusterConfiguration, mongo_command: str, *args, **kwargs) -> Dict[str, any]: + def checkOrCreateReplicaSet(self, cluster_object: V1MongoClusterConfiguration) -> None: """ - Executes the given mongo command inside the pod with the given name. - Retries a few times in case we receive a handshake failure. - :param name: The name of the cluster. - :param namespace: The namespace of the cluster. - :param mongo_command: The command to be executed in mongo. - :return: The response from MongoDB. See files in `tests/fixtures/mongo_responses` for examples. - :raise ValueError: If the result could not be parsed. - :raise TimeoutError: If we could not connect after retrying. + Checks that the replica set is initialized, or initializes it otherwise. + :param cluster_object: The cluster object from the YAML file. + :raise ValueError: In case we receive an unexpected response from Mongo. + :raise ApiException: In case we receive an unexpected response from Kubernetes. """ - for _ in range(self.MONGO_COMMAND_RETRIES): - try: - replicaset = cluster_object.metadata.name - - if replicaset not in self.mongo_connections: - self.mongo_connections[replicaset] = MongoClient( - MongoResources.getConnectionSeeds(cluster_object), - connectTimeoutMS=60000, - serverSelectionTimeoutMS=60000, - replicaSet=replicaset, - event_listeners=[CommandLogger(), - TopologyLogger(cluster_object, - replica_set_ready_callback=self._onReplicaSetReady), - ServerLogger(), - HeartbeatLogger(cluster_object, - all_hosts_ready_callback=self._onAllHostsReady) - ] - ) - - return self.mongo_connections[replicaset].admin.command(mongo_command, *args, **kwargs) - except ConnectionFailure as e: - logging.error("Exception while trying to connect to Mongo: %s", str(e)) + cluster_name = cluster_object.metadata.name + namespace = cluster_object.metadata.namespace + replicas = cluster_object.spec.mongodb.replicas - logging.info("Command timed out, waiting %s seconds before trying again (attempt %s/%s)", - self.MONGO_COMMAND_WAIT, _, self.MONGO_COMMAND_RETRIES) + create_status_command = MongoResources.createStatusCommand() - sleep(self.MONGO_COMMAND_WAIT) + try: + create_status_response = self._executeAdminCommand(cluster_object, create_status_command) + logging.debug("Checking replicas, received %s", repr(create_status_response)) + + # The replica set could not be checked + if create_status_response["ok"] != 1: + raise ValueError("Unexpected response trying to check replicas: '{}'".format( + repr(create_status_response))) - raise TimeoutError("Could not execute command after {} retries!".format(self.MONGO_COMMAND_RETRIES)) + logging.info("The replica set %s @ ns/%s seems to be working properly with %s/%s pods.", + cluster_name, namespace, len(create_status_response["members"]), replicas) - def initializeReplicaSet(self, cluster_object: V1MongoClusterConfiguration) -> None: + # The amount of replicas is not the same as configured, we need to fix this + if replicas != len(create_status_response["members"]): + self._reconfigureReplicaSet(cluster_object) + + except OperationFailure as err: + if str(err) != self.NO_REPLICA_SET_RESPONSE: + raise + + # If the replica set is not initialized yet, we initialize it + self._initializeReplicaSet(cluster_object) + + def createUsers(self, cluster_object: V1MongoClusterConfiguration) -> None: """ - Initializes the replica set by sending an `initiate` command to the 1st Mongo pod. + Creates the users required for each of the pods in the replica. :param cluster_object: The cluster object from the YAML file. :raise ValueError: In case we receive an unexpected response from Mongo. :raise ApiException: In case we receive an unexpected response from Kubernetes. @@ -95,19 +83,15 @@ def initializeReplicaSet(self, cluster_object: V1MongoClusterConfiguration) -> N cluster_name = cluster_object.metadata.name namespace = cluster_object.metadata.namespace - create_replica_command, create_replica_args = MongoResources.createReplicaInitiateCommand(cluster_object) - conn = MongoClient(MongoResources.getMemberHostname(0, cluster_name, namespace)) - create_replica_response = conn.admin.command(create_replica_command, create_replica_args) - - logging.debug("Initializing replica, received %s", repr(create_replica_response)) - - if create_replica_response["ok"] == 1: - logging.info("Initialized replica set %s @ ns/%s", cluster_name, namespace) - else: - raise ValueError("Unexpected response initializing replica set {} @ ns/{}:\n{}" - .format(cluster_name, namespace, create_replica_response)) + secret_name = AdminSecretChecker.getSecretName(cluster_name) + admin_credentials = self._kubernetes_service.getSecret(secret_name, namespace) + create_admin_command, create_admin_args, create_admin_kwargs = MongoResources.createCreateAdminCommand( + admin_credentials) + create_admin_response = self._executeAdminCommand(cluster_object, create_admin_command, create_admin_args, + **create_admin_kwargs) + logging.info("Created admin user: %s", create_admin_response) - def reconfigureReplicaSet(self, cluster_object: V1MongoClusterConfiguration) -> None: + def _reconfigureReplicaSet(self, cluster_object: V1MongoClusterConfiguration) -> None: """ Initializes the replica set by sending a `reconfig` command to the 1st Mongo pod. :param cluster_object: The cluster object from the YAML file. @@ -119,63 +103,101 @@ def reconfigureReplicaSet(self, cluster_object: V1MongoClusterConfiguration) -> replicas = cluster_object.spec.mongodb.replicas reconfigure_command, reconfigure_args = MongoResources.createReplicaReconfigureCommand(cluster_object) - reconfigure_response = self._mongoAdminCommand(cluster_object, reconfigure_command, reconfigure_args) + reconfigure_response = self._executeAdminCommand(cluster_object, reconfigure_command, reconfigure_args) logging.debug("Reconfiguring replica, received %s", repr(reconfigure_response)) - if reconfigure_response["ok"] == 1: - logging.info("Reconfigured replica set %s @ ns/%s to %s pods", cluster_name, namespace, replicas) - else: + if reconfigure_response["ok"] != 1: raise ValueError("Unexpected response reconfiguring replica set {} @ ns/{}:\n{}" .format(cluster_name, namespace, reconfigure_response)) - def checkReplicaSetOrInitialize(self, cluster_object: V1MongoClusterConfiguration) -> None: + logging.info("Reconfigured replica set %s @ ns/%s to %s pods", cluster_name, namespace, replicas) + + @staticmethod + def _initializeReplicaSet(cluster_object: V1MongoClusterConfiguration) -> None: """ - Checks that the replica set is initialized, or initializes it otherwise. + Initializes the replica set by sending an `initiate` command to the 1st Mongo pod. :param cluster_object: The cluster object from the YAML file. :raise ValueError: In case we receive an unexpected response from Mongo. :raise ApiException: In case we receive an unexpected response from Kubernetes. """ cluster_name = cluster_object.metadata.name namespace = cluster_object.metadata.namespace - replicas = cluster_object.spec.mongodb.replicas - - create_status_command = MongoResources.createStatusCommand() - - try: - create_status_response = self._mongoAdminCommand(cluster_object, create_status_command) - logging.debug("Checking replicas, received %s", repr(create_status_response)) + + master_connection = MongoClient(MongoResources.getMemberHostname(0, cluster_name, namespace)) + create_replica_command, create_replica_args = MongoResources.createReplicaInitiateCommand(cluster_object) + create_replica_response = master_connection.admin.command(create_replica_command, create_replica_args) + + if create_replica_response["ok"] == 1: + logging.info("Initialized replica set %s @ ns/%s", cluster_name, namespace) + return + + logging.error("Initializing replica set failed, received %s", repr(create_replica_response)) + raise ValueError("Unexpected response initializing replica set {} @ ns/{}:\n{}" + .format(cluster_name, namespace, create_replica_response)) + + def _createMongoClientForReplicaSet(self, replica_set_name: str, cluster_object: V1MongoClusterConfiguration + ) -> MongoClient: + """ + Creates a new MongoClient instance for a replica set. + :param replica_set_name: The name of the replica set. + :return: The mongo client. + """ + return MongoClient( + MongoResources.getMemberHostnames(cluster_object), + connectTimeoutMS = 60000, + serverSelectionTimeoutMS = 60000, + replicaSet = replica_set_name, + event_listeners = [ + CommandLogger(), + ServerLogger(), + TopologyListener(cluster_object, replica_set_ready_callback = self._onReplicaSetReady), + HeartbeatListener(cluster_object, all_hosts_ready_callback = self._onAllHostsReady) + ] + ) - if create_status_response["ok"] == 1: - logging.info("The replica set %s @ ns/%s seems to be working properly with %s/%s pods.", - cluster_name, namespace, len(create_status_response["members"]), replicas) - if replicas != len(create_status_response["members"]): - self.reconfigureReplicaSet(cluster_object) - else: - raise ValueError("Unexpected response trying to check replicas: '{}'".format( - repr(create_status_response))) + def _onReplicaSetReady(self, cluster_object: V1MongoClusterConfiguration) -> None: + """ + Callback triggered when a replica set is ready to be operated on. + If a restore is still needed for the given replica set, it will be executed at this stage. + :param cluster_object: The cluster configuration object for the replica set. + """ + if cluster_object.metadata.name in self._restored_cluster_names: + # A restore was already done for this replica set, so we don't have to do anything. + return + self._restore_helper.restoreIfNeeded(cluster_object) + self._restored_cluster_names.append(cluster_object.metadata.name) - except OperationFailure as e: - # If the replica set is not initialized yet, we initialize it - if str(e) == "no replset config has been received": - return self.initializeReplicaSet(cluster_object) - raise + def _onAllHostsReady(self, cluster_object: V1MongoClusterConfiguration) -> None: + """ + Callback triggered when all hosts in the would-be replica set are available. + :param cluster_object: The cluster configuration object for the hosts in the would-be replica set. + """ + self._initializeReplicaSet(cluster_object) - def createUsers(self, cluster_object: V1MongoClusterConfiguration) -> None: + def _executeAdminCommand(self, cluster_object: V1MongoClusterConfiguration, mongo_command: str, *args, **kwargs + ) -> Optional[Dict[str, any]]: """ - Creates the users required for each of the pods in the replica. - :param cluster_object: The cluster object from the YAML file. - :raise ValueError: In case we receive an unexpected response from Mongo. - :raise ApiException: In case we receive an unexpected response from Kubernetes. + Executes the given mongo command inside the pod with the given name. + Retries a few times in case we receive a handshake failure. + :param name: The name of the cluster. + :param namespace: The namespace of the cluster. + :param mongo_command: The command to be executed in mongo. + :return: The response from MongoDB. See files in `tests/fixtures/mongo_responses` for examples. + :raise ValueError: If the result could not be parsed. + :raise TimeoutError: If we could not connect after retrying. """ - cluster_name = cluster_object.metadata.name - namespace = cluster_object.metadata.namespace + for _ in range(self.MONGO_COMMAND_RETRIES): + try: + replica_set_name = cluster_object.metadata.name + if replica_set_name not in self._connected_replica_sets: + self._connected_replica_sets[replica_set_name] = self._createMongoClientForReplicaSet( + replica_set_name, cluster_object) + return self._connected_replica_sets[replica_set_name].admin.command(mongo_command, *args, **kwargs) + except ConnectionFailure as err: + logging.error("Exception while trying to connect to Mongo: %s", str(err)) + logging.info("Command timed out, waiting %s seconds before trying again (attempt %s/%s)", + self.MONGO_COMMAND_WAIT, _, self.MONGO_COMMAND_RETRIES) + sleep(self.MONGO_COMMAND_WAIT) - secret_name = AdminSecretChecker.getSecretName(cluster_name) - admin_credentials = self.kubernetes_service.getSecret(secret_name, namespace) - create_admin_command, create_admin_args, create_admin_kwargs = MongoResources.createCreateAdminCommand( - admin_credentials) - logging.info("Creating admin user.") - create_admin_response = self._mongoAdminCommand(cluster_object, create_admin_command, create_admin_args, - **create_admin_kwargs) - logging.info("Got response: %s", create_admin_response) + raise TimeoutError("Could not execute command after {} retries!".format(self.MONGO_COMMAND_RETRIES)) diff --git a/mongoOperator/services/__init__.py b/mongoOperator/services/__init__.py new file mode 100644 index 0000000..97b8d1e --- /dev/null +++ b/mongoOperator/services/__init__.py @@ -0,0 +1 @@ +# Copyright (c) 2018 Ultimaker diff --git a/tests/helpers/TestCommandLogger.py b/tests/helpers/TestCommandLogger.py new file mode 100644 index 0000000..fb3300e --- /dev/null +++ b/tests/helpers/TestCommandLogger.py @@ -0,0 +1,26 @@ +# Copyright (c) 2018 Ultimaker +# !/usr/bin/env python +# -*- coding: utf-8 -*- +from typing import cast +from unittest import TestCase + +from pymongo.monitoring import CommandStartedEvent, CommandFailedEvent, CommandSucceededEvent + +from mongoOperator.helpers.listeners.mongo.CommandLogger import CommandLogger + + +class CommandEventMock: + """ Mock implementation of a CommandEvent. """ + command_name = "foo" + request_id = 1 + connection_id = 1 + duration_micros = 10000 + + +class TestCommandLogger(TestCase): + + def test_commandLogger(self): + command_logger = CommandLogger() + command_logger.started(event = cast(CommandStartedEvent, CommandEventMock())) + command_logger.succeeded(event = cast(CommandSucceededEvent, CommandEventMock())) + command_logger.failed(event = cast(CommandFailedEvent, CommandEventMock())) diff --git a/tests/helpers/TestMongoMonitoring.py b/tests/helpers/TestMongoMonitoring.py deleted file mode 100644 index 1bc1b99..0000000 --- a/tests/helpers/TestMongoMonitoring.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) 2018 Ultimaker -# !/usr/bin/env python -# -*- coding: utf-8 -*- - -from mongoOperator.helpers.MongoMonitoring import CommandLogger, TopologyLogger, ServerLogger, HeartbeatLogger - -from unittest import TestCase -from unittest.mock import MagicMock, patch, call - - -class CommandEventMock: - """ - Mock implementation of a CommandEvent. - """ - command_name = "foo" - request_id = 1 - connection_id = 1 - duration_micros = 10000 - - -class ServerDescriptionEventMock: - server_type = "foo" - server_type_name = "foo" - - -class ServerEventMock: - """ - Mock implementation of a ServerEvent. - """ - server_address = "localhost" - topology_id = 1 - previous_description = ServerDescriptionEventMock() - new_description = ServerDescriptionEventMock() - - -class TestRestoreHelper(TestCase): - - def setUp(self): - return - - def test_commandLogger(self): - commandlogger = CommandLogger() - commandlogger.started(event=CommandEventMock()) - commandlogger.succeeded(event=CommandEventMock()) - commandlogger.failed(event=CommandEventMock()) - - def test_serverLogger(self): - serverlogger = ServerLogger() - serverlogger.opened(event=ServerEventMock()) - serverlogger.description_changed(event=ServerEventMock()) diff --git a/tests/helpers/TestServerLogger.py b/tests/helpers/TestServerLogger.py new file mode 100644 index 0000000..9dd86f9 --- /dev/null +++ b/tests/helpers/TestServerLogger.py @@ -0,0 +1,31 @@ +# Copyright (c) 2018 Ultimaker +# !/usr/bin/env python +# -*- coding: utf-8 -*- +from typing import cast +from unittest import TestCase + +from pymongo.monitoring import ServerOpeningEvent, ServerClosedEvent, ServerDescriptionChangedEvent + +from mongoOperator.helpers.listeners.mongo.ServerLogger import ServerLogger + + +class ServerDescriptionEventMock: + server_type = "foo" + server_type_name = "foo" + + +class ServerEventMock: + """ Mock implementation of a ServerEvent. """ + server_address = "localhost" + topology_id = 1 + previous_description = ServerDescriptionEventMock() + new_description = ServerDescriptionEventMock() + + +class TestServerLogger(TestCase): + + def test_serverLogger(self): + command_logger = ServerLogger() + command_logger.opened(event = cast(ServerOpeningEvent, ServerEventMock())) + command_logger.closed(event = cast(ServerClosedEvent, ServerEventMock())) + command_logger.description_changed(event = cast(ServerDescriptionChangedEvent, ServerEventMock())) From 59a3b4aeb9b7ecf8f5b6ae874a0e00a29a9c3782 Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Wed, 13 Feb 2019 22:31:26 +0100 Subject: [PATCH 05/36] Refactor --- mongoOperator/helpers/ClusterChecker.py | 40 ++--- mongoOperator/helpers/RestoreHelper.py | 15 +- mongoOperator/services/MongoService.py | 15 +- tests/helpers/TestClusterChecker.py | 30 ++-- tests/helpers/TestRestoreHelper.py | 37 ++--- tests/services/TestMongoService.py | 203 +++++++++++------------- 6 files changed, 156 insertions(+), 184 deletions(-) diff --git a/mongoOperator/helpers/ClusterChecker.py b/mongoOperator/helpers/ClusterChecker.py index b76b5af..dbc37bb 100644 --- a/mongoOperator/helpers/ClusterChecker.py +++ b/mongoOperator/helpers/ClusterChecker.py @@ -22,14 +22,14 @@ class ClusterChecker: STREAM_REQUEST_TIMEOUT = (15.0, 5.0) # connect, read timeout def __init__(self) -> None: - self.cluster_versions: Dict[Tuple[str, str], str] = { } # format: {(cluster_name, namespace): resource_version} - self.kubernetes_service = KubernetesService() - self.mongo_service = MongoService(self.kubernetes_service) - self.backup_checker = BackupChecker(self.kubernetes_service) - self.resource_checkers: List[BaseResourceChecker] = [ - ServiceChecker(self.kubernetes_service), - StatefulSetChecker(self.kubernetes_service), - AdminSecretChecker(self.kubernetes_service), + self._cluster_versions: Dict[Tuple[str, str], str] = {} # format: {(cluster_name, namespace): resource_version} + self._kubernetes_service = KubernetesService() + self._mongo_service = MongoService(self._kubernetes_service) + self._backup_checker = BackupChecker(self._kubernetes_service) + self._resource_checkers: List[BaseResourceChecker] = [ + ServiceChecker(self._kubernetes_service), + StatefulSetChecker(self._kubernetes_service), + AdminSecretChecker(self._kubernetes_service), ] @staticmethod @@ -53,7 +53,7 @@ def checkExistingClusters(self) -> None: Check all Mongo objects and see if the sub objects are available. If they are not, they should be (re-)created to ensure the cluster is in the expected state. """ - mongo_objects = self.kubernetes_service.listMongoObjects() + mongo_objects = self._kubernetes_service.listMongoObjects() logging.info("Checking %s mongo objects.", len(mongo_objects["items"])) for cluster_dict in mongo_objects["items"]: cluster_object = self._parseConfiguration(cluster_dict) @@ -67,10 +67,10 @@ def streamEvents(self) -> None: event_watcher = Watch() # start watching from the latest version that we have - if self.cluster_versions: - event_watcher.resource_version = max(self.cluster_versions.values()) + if self._cluster_versions: + event_watcher.resource_version = max(self._cluster_versions.values()) - for event in event_watcher.stream(self.kubernetes_service.listMongoObjects, + for event in event_watcher.stream(self._kubernetes_service.listMongoObjects, _request_timeout = self.STREAM_REQUEST_TIMEOUT): logging.info("Received event %s", event) @@ -97,7 +97,7 @@ def collectGarbage(self) -> None: """ Cleans up any resources that are left after a cluster has been removed. """ - for checker in self.resource_checkers: + for checker in self._resource_checkers: checker.cleanResources() def checkCluster(self, cluster_object: V1MongoClusterConfiguration, force: bool = False) -> None: @@ -108,16 +108,16 @@ def checkCluster(self, cluster_object: V1MongoClusterConfiguration, force: bool """ key = (cluster_object.metadata.name, cluster_object.metadata.namespace) - if self.cluster_versions.get(key) == cluster_object.metadata.resource_version and not force: + if self._cluster_versions.get(key) == cluster_object.metadata.resource_version and not force: logging.debug("Cluster object %s has been checked already in version %s.", key, cluster_object.metadata.resource_version) # we still want to check the replicas to make sure everything is working. - self.mongo_service.checkOrCreateReplicaSet(cluster_object) + self._mongo_service.checkOrCreateReplicaSet(cluster_object) else: - for checker in self.resource_checkers: + for checker in self._resource_checkers: checker.checkResource(cluster_object) - self.mongo_service.checkOrCreateReplicaSet(cluster_object) - self.mongo_service.createUsers(cluster_object) - self.cluster_versions[key] = cluster_object.metadata.resource_version + self._mongo_service.checkOrCreateReplicaSet(cluster_object) + self._mongo_service.createUsers(cluster_object) + self._cluster_versions[key] = cluster_object.metadata.resource_version - self.backup_checker.backupIfNeeded(cluster_object) + self._backup_checker.backupIfNeeded(cluster_object) diff --git a/mongoOperator/helpers/RestoreHelper.py b/mongoOperator/helpers/RestoreHelper.py index ea39e27..c7e7de8 100644 --- a/mongoOperator/helpers/RestoreHelper.py +++ b/mongoOperator/helpers/RestoreHelper.py @@ -110,30 +110,27 @@ def restore(self, cluster_object: V1MongoClusterConfiguration, backup_file: str) :param cluster_object: The cluster object from the YAML file. :param backup_file: The filename of the backup we want to restore. """ - hosts = MongoResources.getConnectionSeeds(cluster_object) + hostnames = MongoResources.getMemberHostnames(cluster_object) - logging.info("Restoring backup file %s to cluster %s @ ns/%s.", backup_file, - cluster_object.metadata.name, cluster_object.metadata.namespace) + logging.info("Restoring backup file %s to cluster %s @ ns/%s.", backup_file, cluster_object.metadata.name, + cluster_object.metadata.namespace) # Download the backup file from the bucket downloaded_file = self._downloadBackup(cluster_object, backup_file) for _ in range(self.RESTORE_RETRIES): - # Wait for the replicaset to become ready - + # Wait for the replica set to become ready try: - logging.info("Running mongorestore --host %s --gzip --archive=%s", ','.join(hosts), downloaded_file) - restore_output = check_output(["mongorestore", "--host", ','.join(hosts), "--gzip", + logging.info("Running mongorestore --host %s --gzip --archive=%s", ','.join(hostnames), downloaded_file) + restore_output = check_output(["mongorestore", "--host", ','.join(hostnames), "--gzip", "--archive=" + downloaded_file]) logging.info("Restore output: %s", restore_output) os.remove(downloaded_file) return True - except CalledProcessError as err: logging.error("Could not restore '{}', attempt {}. Return code: {} stderr: '{}' stdout: '{}'" .format(backup_file, _, err.returncode, err.stderr, err.stdout)) sleep(self.RESTORE_WAIT) - raise SubprocessError("Could not restore '{}' after {} retries!".format(backup_file, self.RESTORE_RETRIES)) def _downloadBackup(self, cluster_object: V1MongoClusterConfiguration, backup_file: str) -> str: diff --git a/mongoOperator/services/MongoService.py b/mongoOperator/services/MongoService.py index 3bf1e62..2a97881 100644 --- a/mongoOperator/services/MongoService.py +++ b/mongoOperator/services/MongoService.py @@ -136,18 +136,16 @@ def _initializeReplicaSet(cluster_object: V1MongoClusterConfiguration) -> None: raise ValueError("Unexpected response initializing replica set {} @ ns/{}:\n{}" .format(cluster_name, namespace, create_replica_response)) - def _createMongoClientForReplicaSet(self, replica_set_name: str, cluster_object: V1MongoClusterConfiguration - ) -> MongoClient: + def _createMongoClientForReplicaSet(self, cluster_object: V1MongoClusterConfiguration) -> MongoClient: """ Creates a new MongoClient instance for a replica set. - :param replica_set_name: The name of the replica set. :return: The mongo client. """ return MongoClient( MongoResources.getMemberHostnames(cluster_object), connectTimeoutMS = 60000, serverSelectionTimeoutMS = 60000, - replicaSet = replica_set_name, + replicaSet = cluster_object.metadata.name, event_listeners = [ CommandLogger(), ServerLogger(), @@ -189,11 +187,10 @@ def _executeAdminCommand(self, cluster_object: V1MongoClusterConfiguration, mong """ for _ in range(self.MONGO_COMMAND_RETRIES): try: - replica_set_name = cluster_object.metadata.name - if replica_set_name not in self._connected_replica_sets: - self._connected_replica_sets[replica_set_name] = self._createMongoClientForReplicaSet( - replica_set_name, cluster_object) - return self._connected_replica_sets[replica_set_name].admin.command(mongo_command, *args, **kwargs) + name = cluster_object.metadata.name + if name not in self._connected_replica_sets: + self._connected_replica_sets[name] = self._createMongoClientForReplicaSet(cluster_object) + return self._connected_replica_sets[name].admin.command(mongo_command, *args, **kwargs) except ConnectionFailure as err: logging.error("Exception while trying to connect to Mongo: %s", str(err)) logging.info("Command timed out, waiting %s seconds before trying again (attempt %s/%s)", diff --git a/tests/helpers/TestClusterChecker.py b/tests/helpers/TestClusterChecker.py index 3ddfc4b..fb11cdf 100644 --- a/tests/helpers/TestClusterChecker.py +++ b/tests/helpers/TestClusterChecker.py @@ -5,7 +5,6 @@ from unittest import TestCase from unittest.mock import patch, call from mongoOperator.helpers.ClusterChecker import ClusterChecker -from mongoOperator.helpers.MongoResources import MongoResources from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration from tests.test_utils import getExampleClusterDefinition from bson.json_util import loads @@ -23,15 +22,16 @@ def setUp(self): self.cluster_dict["metadata"]["resourceVersion"] = "100" self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) - def _getMongoFixture(self, name): + @staticmethod + def _getMongoFixture(name): with open("tests/fixtures/mongo_responses/{}.json".format(name), "rb") as f: return loads(f.read()) def test___init__(self): - self.assertEqual(self.kubernetes_service, self.checker.kubernetes_service) - self.assertEqual(self.kubernetes_service, self.checker.mongo_service.kubernetes_service) - self.assertEqual(3, len(self.checker.resource_checkers), self.checker.resource_checkers) - self.assertEqual({}, self.checker.cluster_versions) + self.assertEqual(self.kubernetes_service, self.checker._kubernetes_service) + self.assertEqual(self.kubernetes_service, self.checker._mongo_service._kubernetes_service) + self.assertEqual(3, len(self.checker._resource_checkers), self.checker._resource_checkers) + self.assertEqual({}, self.checker._cluster_versions) def test__parseConfiguration_ok(self): self.assertEqual(self.cluster_object, self.checker._parseConfiguration(self.cluster_dict)) @@ -44,25 +44,25 @@ def test_checkExistingClusters_empty(self): self.checker.checkExistingClusters() expected = [call.listMongoObjects()] self.assertEqual(expected, self.kubernetes_service.mock_calls) - self.assertEqual({}, self.checker.cluster_versions) + self.assertEqual({}, self.checker._cluster_versions) def test_checkExistingClusters_bad_format(self): self.kubernetes_service.listMongoObjects.return_value = {"items": [{"invalid": "object"}]} self.checker.checkExistingClusters() expected = [call.listMongoObjects()] self.assertEqual(expected, self.kubernetes_service.mock_calls) - self.assertEqual({}, self.checker.cluster_versions) + self.assertEqual({}, self.checker._cluster_versions) @patch("mongoOperator.services.MongoService.MongoClient") @patch("mongoOperator.helpers.BackupChecker.BackupChecker.backupIfNeeded") def test_checkExistingClusters(self, backup_mock, mongoclient_mock): # checkCluster will assume cached version - self.checker.cluster_versions[("mongo-cluster", self.cluster_object.metadata.namespace)] = "100" + self.checker._cluster_versions[("mongo-cluster", self.cluster_object.metadata.namespace)] = "100" self.kubernetes_service.listMongoObjects.return_value = {"items": [self.cluster_dict]} mongoclient_mock.return_value.admin.command.return_value = self._getMongoFixture("replica-status-ok") self.checker.checkExistingClusters() self.assertEqual({("mongo-cluster", self.cluster_object.metadata.namespace): "100"}, - self.checker.cluster_versions) + self.checker._cluster_versions) expected = [call.listMongoObjects()] print(repr(self.kubernetes_service.mock_calls)) self.assertEqual(expected, self.kubernetes_service.mock_calls) @@ -106,7 +106,7 @@ def test_streamEvents_delete(self, stream_mock, garbage_mock): @patch("mongoOperator.helpers.ClusterChecker.Watch") def test_streamEvents_bad_cluster(self, watch_mock): - self.checker.cluster_versions[("mongo-cluster", "default")] = "100" + self.checker._cluster_versions[("mongo-cluster", "default")] = "100" stream_mock = watch_mock.return_value.stream stream_mock.return_value = [{"type": "ADDED", "object": {"thisIsNot": "a_cluster"}}] self.checker.streamEvents() @@ -127,12 +127,12 @@ def test_collectGarbage(self, list_mock, clean_mock): @patch("mongoOperator.helpers.BackupChecker.BackupChecker.backupIfNeeded") def test_checkCluster_same_version(self, backup_mock, mongoclient_mock): # checkCluster will assume cached version - self.checker.cluster_versions[("mongo-cluster", self.cluster_object.metadata.namespace)] = "100" + self.checker._cluster_versions[("mongo-cluster", self.cluster_object.metadata.namespace)] = "100" mongoclient_mock.return_value.admin.command.return_value = self._getMongoFixture("replica-status-ok") self.checker.checkCluster(self.cluster_object) self.assertEqual({("mongo-cluster", self.cluster_object.metadata.namespace): "100"}, - self.checker.cluster_versions) + self.checker._cluster_versions) # expected = [ # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), @@ -149,12 +149,12 @@ def test_checkCluster_same_version(self, backup_mock, mongoclient_mock): @patch("mongoOperator.helpers.BaseResourceChecker.BaseResourceChecker.checkResource") def test_checkCluster_new_version(self, check_mock, admin_mock, backup_mock, mongoclient_mock): admin_mock.return_value = "createUser", "foo", {} - self.checker.cluster_versions[("mongo-cluster", self.cluster_object.metadata.namespace)] = "50" + self.checker._cluster_versions[("mongo-cluster", self.cluster_object.metadata.namespace)] = "50" mongoclient_mock.return_value.admin.command.side_effect = (self._getMongoFixture("replica-status-ok"), self._getMongoFixture("createUser-ok")) self.checker.checkCluster(self.cluster_object) self.assertEqual({("mongo-cluster", self.cluster_object.metadata.namespace): "100"}, - self.checker.cluster_versions) + self.checker._cluster_versions) expected = [call.getSecret('mongo-cluster-admin-credentials', self.cluster_object.metadata.namespace)] self.assertEqual(expected, self.kubernetes_service.mock_calls) self.assertEqual([call(self.cluster_object)] * 3, check_mock.mock_calls) diff --git a/tests/helpers/TestRestoreHelper.py b/tests/helpers/TestRestoreHelper.py index 4b8631d..38544a0 100644 --- a/tests/helpers/TestRestoreHelper.py +++ b/tests/helpers/TestRestoreHelper.py @@ -3,6 +3,7 @@ # -*- coding: utf-8 -*- import json from base64 import b64encode +from typing import cast from kubernetes.client import V1Secret from subprocess import CalledProcessError, SubprocessError @@ -12,13 +13,12 @@ from mongoOperator.helpers.RestoreHelper import RestoreHelper from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration +from mongoOperator.services.KubernetesService import KubernetesService from tests.test_utils import getExampleClusterDefinitionWithRestore, getExampleClusterDefinition class MockBlob: - """ - Mock implementation of storage Blob. - """ + """ Mock implementation of storage Blob. """ name = "somebackupfile.gz" @@ -29,15 +29,15 @@ def setUp(self): self.cluster_dict = getExampleClusterDefinitionWithRestore() self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) self.kubernetes_service = MagicMock() - self.restore_helper = RestoreHelper(self.kubernetes_service) + self.restore_helper = RestoreHelper(cast(KubernetesService, self.kubernetes_service)) self.dummy_credentials = b64encode(json.dumps({"user": "password"}).encode()) self.kubernetes_service.getSecret.return_value = V1Secret(data={"json": self.dummy_credentials}) self.expected_cluster_members = [ - "mongo-cluster-0.mongo-cluster." + self.cluster_object.metadata.namespace + ".svc.cluster.local", - "mongo-cluster-1.mongo-cluster." + self.cluster_object.metadata.namespace + ".svc.cluster.local", - "mongo-cluster-2.mongo-cluster." + self.cluster_object.metadata.namespace + ".svc.cluster.local" + "mongo-cluster-0.mongo-cluster.mongo-operator-cluster.svc.cluster.local", + "mongo-cluster-1.mongo-cluster.mongo-operator-cluster.svc.cluster.local", + "mongo-cluster-2.mongo-cluster.mongo-operator-cluster.svc.cluster.local" ] @patch("mongoOperator.helpers.RestoreHelper.StorageClient") @@ -49,7 +49,8 @@ def test_restoreIfNeeded(self, restore_mock, gcs_service_mock, storage_mock): self.restore_helper.restoreIfNeeded(self.cluster_object) - self.assertEqual([call.getSecret("storage-serviceaccount", self.cluster_object.metadata.namespace)], self.kubernetes_service.mock_calls) + self.assertEqual([call.getSecret("storage-serviceaccount", "mongo-operator-cluster")], + self.kubernetes_service.mock_calls) expected_service_call = call.from_service_account_info({"user": "password"}) self.assertEqual([expected_service_call], gcs_service_mock.mock_calls) @@ -74,12 +75,11 @@ def test_restoreIfNeeded(self, restore_mock, gcs_service_mock, storage_mock): @patch("mongoOperator.helpers.RestoreHelper.ServiceCredentials") @patch("mongoOperator.helpers.RestoreHelper.check_output") def test_restore(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock): - expected_backup_name = "mongodb-backup-" + self.cluster_object.metadata.namespace +\ - "-mongo-cluster-2018-02-28_140000.archive.gz" + expected_backup_name = "mongodb-backup-mongo-operator-cluster-mongo-cluster-2018-02-28_140000.archive.gz" self.restore_helper.restore(self.cluster_object, expected_backup_name) - self.assertEqual([call.getSecret("storage-serviceaccount", self.cluster_object.metadata.namespace)], + self.assertEqual([call.getSecret("storage-serviceaccount", "mongo-operator-cluster")], self.kubernetes_service.mock_calls) subprocess_mock.assert_called_once_with([ @@ -108,23 +108,20 @@ def test_restore(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock) @patch("mongoOperator.helpers.RestoreHelper.check_output") def test_restore_mongo_error(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock): subprocess_mock.side_effect = CalledProcessError(3, "cmd", "output", "error") - expected_backup_name = "mongodb-backup-" + self.cluster_object.metadata.namespace +\ - "-mongo-cluster-2018-02-28_140000.archive.gz" + expected_backup_name = "mongodb-backup-mongo-cluster-mongo-cluster-2018-02-28_140000.archive.gz" with self.assertRaises(SubprocessError) as context: self.restore_helper.restore(self.cluster_object, expected_backup_name) - self.assertEqual("Could not restore " - "'" + expected_backup_name + "' " - "after 4 retries!", - str(context.exception)) - + self.assertEqual("Could not restore '" + expected_backup_name + "' after 4 retries!", str(context.exception)) self.assertEqual(4, subprocess_mock.call_count) @patch("mongoOperator.helpers.RestoreHelper.check_output") def test_restore_gcs_bad_credentials(self, subprocess_mock): - expected_backup_name = "mongodb-backup-" + self.cluster_object.metadata.namespace +\ - "-mongo-cluster-2018-02-28_140000.archive.gz" + expected_backup_name = "mongodb-backup-mongo-cluster-mongo-cluster-2018-02-28_140000.archive.gz" + with self.assertRaises(ValueError) as context: self.restore_helper.restore(self.cluster_object, expected_backup_name) + self.assertIn("Service account info was not in the expected format", str(context.exception)) + self.assertEqual(0, subprocess_mock.call_count) diff --git a/tests/services/TestMongoService.py b/tests/services/TestMongoService.py index 6586852..72955db 100644 --- a/tests/services/TestMongoService.py +++ b/tests/services/TestMongoService.py @@ -5,10 +5,9 @@ from base64 import b64encode from kubernetes.client import V1Secret, V1ObjectMeta -from kubernetes.client.rest import ApiException from typing import Union from unittest import TestCase -from unittest.mock import MagicMock, patch, call +from unittest.mock import MagicMock, patch from mongoOperator.helpers.MongoResources import MongoResources from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration @@ -18,16 +17,16 @@ from bson.json_util import loads from pymongo.errors import OperationFailure, ConnectionFailure + @patch("mongoOperator.services.MongoService.sleep", MagicMock()) +@patch("mongoOperator.services.MongoService.MongoClient") class TestMongoService(TestCase): maxDiff = None def setUp(self): super().setUp() self.kubernetes_service: Union[MagicMock, KubernetesService] = MagicMock() - self.dummy_credentials = b64encode(json.dumps({"user": "password"}).encode()) - self.kubernetes_service.getSecret.return_value = V1Secret( metadata=V1ObjectMeta(name="mongo-cluster-admin-credentials", namespace="default"), data={ @@ -36,7 +35,6 @@ def setUp(self): "json": self.dummy_credentials }, ) - self.service = MongoService(self.kubernetes_service) self.cluster_dict = getExampleClusterDefinition() self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) @@ -65,12 +63,9 @@ def setUp(self): "_id": "mongo-cluster", "version": 1, "members": [ - {"_id": 0, "host": "mongo-cluster-0.mongo-cluster." + self.cluster_object.metadata.namespace + - ".svc.cluster.local"}, - {"_id": 1, "host": "mongo-cluster-1.mongo-cluster." + self.cluster_object.metadata.namespace + - ".svc.cluster.local"}, - {"_id": 2, "host": "mongo-cluster-2.mongo-cluster." + self.cluster_object.metadata.namespace + - ".svc.cluster.local"} + {"_id": 0, "host": "mongo-cluster-0.mongo-cluster.mongo-operator-cluster.svc.cluster.local"}, + {"_id": 1, "host": "mongo-cluster-1.mongo-cluster.mongo-operator-cluster.svc.cluster.local"}, + {"_id": 2, "host": "mongo-cluster-2.mongo-cluster.mongo-operator-cluster.svc.cluster.local"} ] } @@ -79,80 +74,81 @@ def setUp(self): "roles": [{"role": "root", "db": "admin"}] } - def _getFixture(self, name): + @staticmethod + def _getFixture(name): with open("tests/fixtures/mongo_responses/{}.json".format(name)) as f: return loads(f.read()) - @patch("mongoOperator.services.MongoService.MongoClient") - def test__mongoAdminCommand(self, mongoclient_mock): - mongoclient_mock.return_value.admin.command.return_value = self._getFixture("initiate-ok") - result = self.service._mongoAdminCommand(self.cluster_object, "replSetInitiate") + def test_mongoAdminCommand(self, mongo_client_mock): + mongo_client_mock.return_value.admin.command.return_value = self._getFixture("initiate-ok") + result = self.service._executeAdminCommand(self.cluster_object, "replSetInitiate") self.assertEqual(self.initiate_ok_response, result) + # expected_calls = [ # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), # call().admin.command('replSetInitiate') # ] # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) - @patch("mongoOperator.services.MongoService.MongoClient") - def test__mongoAdminCommand_NodeNotFound(self, mongoclient_mock): - mongoclient_mock.return_value.admin.command.side_effect = OperationFailure("replSetInitiate quorum check failed" - " because not all proposed set " - "members responded affirmatively:") + def test__mongoAdminCommand_NodeNotFound(self, mongo_client_mock): + mongo_client_mock.return_value.admin.command.side_effect = OperationFailure( + "replSetInitiate quorum check failed because not all proposed set members responded affirmatively:") + with self.assertRaises(OperationFailure) as ex: mongo_command, mongo_args = MongoResources.createReplicaInitiateCommand(self.cluster_object) - self.service._mongoAdminCommand(self.cluster_object, mongo_command, mongo_args) + self.service._executeAdminCommand(self.cluster_object, mongo_command, mongo_args) # expected = [ # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), # call().admin.command('replSetInitiate', self.expected_cluster_config) # ] - # self.assertEqual(expected, mongoclient_mock.mock_calls) + # self.assertEqual(expected, mongo_client_mock.mock_calls) + self.assertIn("replSetInitiate quorum check failed", str(ex.exception)) - @patch("mongoOperator.services.MongoService.MongoClient") - def test__mongoAdminCommand_connect_failed(self, mongoclient_mock): - mongoclient_mock.return_value.admin.command.side_effect = ( + def test__mongoAdminCommand_connect_failed(self, mongo_client_mock): + mongo_client_mock.return_value.admin.command.side_effect = ( ConnectionFailure("connection attempt failed"), self._getFixture("initiate-ok") ) - result = self.service._mongoAdminCommand(self.cluster_object, "replSetGetStatus") + result = self.service._executeAdminCommand(self.cluster_object, "replSetGetStatus") self.assertEqual(self.initiate_ok_response, result) + # expected_calls = 2 * [ # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), # call().admin.command('replSetGetStatus') # ] - # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) + # self.assertEqual(expected_calls, mongo_client_mock.mock_calls) - @patch("mongoOperator.services.MongoService.MongoClient") - def test__mongoAdminCommand_TimeoutError(self, mongoclient_mock): - mongoclient_mock.return_value.admin.command.side_effect = ( + def test__mongoAdminCommand_TimeoutError(self, mongo_client_mock): + mongo_client_mock.return_value.admin.command.side_effect = ( ConnectionFailure("connection attempt failed"), ConnectionFailure("connection attempt failed"), ConnectionFailure("connection attempt failed"), ConnectionFailure("connection attempt failed"), OperationFailure("no replset config has been received") ) + with self.assertRaises(TimeoutError) as context: - self.service._mongoAdminCommand(self.cluster_object, "replSetGetStatus") + self.service._executeAdminCommand(self.cluster_object, "replSetGetStatus") self.assertEqual("Could not execute command after 4 retries!", str(context.exception)) + # expected_calls = 4 * [ # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), # call().admin.command('replSetGetStatus') # ] - # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) + # self.assertEqual(expected_calls, mongo_client_mock.mock_calls) - @patch("mongoOperator.services.MongoService.MongoClient") - def test__mongoAdminCommand_NoPrimary(self, mongoclient_mock): - mongoclient_mock.return_value.admin.command.side_effect = ( + def test__mongoAdminCommand_NoPrimary(self, mongo_client_mock): + mongo_client_mock.return_value.admin.command.side_effect = ( ConnectionFailure("No replica set members match selector \"Primary()\""), self._getFixture("initiate-ok"), self._getFixture("initiate-ok") ) - self.service._mongoAdminCommand(self.cluster_object, "replSetGetStatus") + self.service._executeAdminCommand(self.cluster_object, "replSetGetStatus") # expected_calls = [ # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), @@ -166,12 +162,10 @@ def test__mongoAdminCommand_NoPrimary(self, mongoclient_mock): # print(repr(expected_calls)) # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) - @patch("mongoOperator.services.MongoService.MongoClient") - @patch("mongoOperator.helpers.RestoreHelper.RestoreHelper.restoreIfNeeded") - def test_initializeReplicaSet(self, restoreifneeded_mock, mongoclient_mock): - mongoclient_mock.return_value.admin.command.return_value = self._getFixture("initiate-ok") - - self.service.initializeReplicaSet(self.cluster_object) + def test_initializeReplicaSet(self, mongo_client_mock): + mongo_client_mock.return_value.admin.command.return_value = self._getFixture("initiate-ok") + self.service._initializeReplicaSet(self.cluster_object) + # expected_calls = [ # call(MongoResources.getMemberHostname(0, self.cluster_object.metadata.name, # self.cluster_object.metadata.namespace)), @@ -180,63 +174,57 @@ def test_initializeReplicaSet(self, restoreifneeded_mock, mongoclient_mock): # # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) - @patch("mongoOperator.services.MongoService.MongoClient") - def test_initializeReplicaSet_ValueError(self, mongoclient_mock): + def test_initializeReplicaSet_ValueError(self, mongo_client_mock): command_result = self._getFixture("initiate-ok") command_result["ok"] = 2 - mongoclient_mock.return_value.admin.command.return_value = command_result + mongo_client_mock.return_value.admin.command.return_value = command_result + with self.assertRaises(ValueError) as context: - self.service.initializeReplicaSet(self.cluster_object) + self.service._initializeReplicaSet(self.cluster_object) self.assertEqual("Unexpected response initializing replica set mongo-cluster @ ns/" + self.cluster_object.metadata.namespace + ":\n" + str(self.initiate_not_found_response), str(context.exception)) - @patch("mongoOperator.services.MongoService.MongoClient") - def test_reconfigureReplicaSet(self, mongoclient_mock): - mongoclient_mock.return_value.admin.command.return_value = self._getFixture("initiate-ok") - - self.service.reconfigureReplicaSet(self.cluster_object) + def test_reconfigureReplicaSet(self, mongo_client_mock): + mongo_client_mock.return_value.admin.command.return_value = self._getFixture("initiate-ok") + self.service._reconfigureReplicaSet(self.cluster_object) + # expected_calls = [ # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), # call().admin.command('replSetReconfig', self.expected_cluster_config) # ] # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) - @patch("mongoOperator.services.MongoService.MongoClient") - def test_reconfigureReplicaSet_ValueError(self, mongoclient_mock): + def test_reconfigureReplicaSet_ValueError(self, mongo_client_mock): command_result = self._getFixture("initiate-ok") command_result["ok"] = 2 - mongoclient_mock.return_value.admin.command.return_value = command_result + mongo_client_mock.return_value.admin.command.return_value = command_result with self.assertRaises(ValueError) as context: - self.service.reconfigureReplicaSet(self.cluster_object) + self.service._reconfigureReplicaSet(self.cluster_object) - self.assertEqual("Unexpected response reconfiguring replica set mongo-cluster @ ns/" + - self.cluster_object.metadata.namespace + ":\n" + + self.assertEqual("Unexpected response reconfiguring replica set mongo-cluster @ ns/mongo-operator-cluster:\n" + str(self.initiate_not_found_response), str(context.exception)) - @patch("mongoOperator.services.MongoService.MongoClient") - def test_checkReplicaSetOrInitialize_ok(self, mongoclient_mock): - mongoclient_mock.return_value.admin.command.return_value = self._getFixture("replica-status-ok") - self.service.checkReplicaSetOrInitialize(self.cluster_object) + def test_checkOrCreateReplicaSet_ok(self, mongo_client_mock): + mongo_client_mock.return_value.admin.command.return_value = self._getFixture("replica-status-ok") + self.service.checkOrCreateReplicaSet(self.cluster_object) # expected_calls = [ # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), # call().admin.command('replSetGetStatus') # ] - # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) + # self.assertEqual(expected_calls, mongo_client_mock.mock_calls) - @patch("mongoOperator.services.MongoService.MongoClient") - @patch("mongoOperator.helpers.RestoreHelper.RestoreHelper.restoreIfNeeded") - def test_checkReplicaSetOrInitialize_initialize(self, restoreifneeded_mock, mongoclient_mock): - mongoclient_mock.return_value.admin.command.side_effect = ( + def test_checkOrCreateReplicaSet_initialize(self, mongo_client_mock): + mongo_client_mock.return_value.admin.command.side_effect = ( OperationFailure("no replset config has been received"), - self._getFixture("initiate-ok")) - - self.service.checkReplicaSetOrInitialize(self.cluster_object) + self._getFixture("initiate-ok") + ) + self.service.checkOrCreateReplicaSet(self.cluster_object) # expected_calls = [ # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), @@ -248,17 +236,14 @@ def test_checkReplicaSetOrInitialize_initialize(self, restoreifneeded_mock, mong # # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) - @patch("mongoOperator.services.MongoService.MongoClient") - def test_checkReplicaSetOrInitialize_reconfigure(self, mongoclient_mock): + def test_checkOrCreateReplicaSet_reconfigure(self, mongo_client_mock): self.cluster_object.spec.mongodb.replicas = 4 - mongoclient_mock.return_value.admin.command.return_value = self._getFixture("replica-status-ok") - self.service.checkReplicaSetOrInitialize(self.cluster_object) - - cluster_config = self.expected_cluster_config - cluster_config["members"].append({"_id": 3, "host": "mongo-cluster-3.mongo-cluster." + - self.cluster_object.metadata.namespace + - ".svc.cluster.local"}) - self.expected_cluster_config = cluster_config + mongo_client_mock.return_value.admin.command.return_value = self._getFixture("replica-status-ok") + self.service.checkOrCreateReplicaSet(self.cluster_object) + self.expected_cluster_config["members"].append({ + "_id": 3, + "host": "mongo-cluster-3.mongo-cluster.mongo-cluster.svc.cluster.local" + }) # expected_calls = [ # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), @@ -267,36 +252,33 @@ def test_checkReplicaSetOrInitialize_reconfigure(self, mongoclient_mock): # call().admin.command('replSetReconfig', self.expected_cluster_config) # ] # - # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) + # self.assertEqual(expected_calls, mongo_client_mock.mock_calls) - @patch("mongoOperator.services.MongoService.MongoClient") - def test_checkReplicaSetOrInitialize_ValueError(self, mongoclient_mock): + def test_checkOrCreateReplicaSet_ValueError(self, mongo_client_mock): response = self._getFixture("replica-status-ok") response["ok"] = 2 - - mongoclient_mock.return_value.admin.command.return_value = response + mongo_client_mock.return_value.admin.command.return_value = response with self.assertRaises(ValueError) as context: - self.service.checkReplicaSetOrInitialize(self.cluster_object) + self.service.checkOrCreateReplicaSet(self.cluster_object) # expected_calls = [ # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), # call().admin.command('replSetGetStatus') # ] # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) + self.assertIn("Unexpected response trying to check replicas: ", str(context.exception)) - @patch("mongoOperator.services.MongoService.MongoClient") - @patch("mongoOperator.helpers.RestoreHelper.RestoreHelper.restoreIfNeeded") - def test_checkReplicaSetOrInitialize_OperationalFailure(self, restoreifneeded_mock, mongoclient_mock): - badvalue = "BadValue: Unexpected field foo in replica set member configuration for member:" \ + def test_checkOrCreateReplicaSet_OperationalFailure(self, mongo_client_mock): + bad_value = "BadValue: Unexpected field foo in replica set member configuration for member:" \ "{ _id: 0, foo: \"localhost:27017\" }" - mongoclient_mock.return_value.admin.command.side_effect = ( - OperationFailure(badvalue), - OperationFailure(badvalue)) + mongo_client_mock.return_value.admin.command.side_effect = ( + OperationFailure(bad_value), + OperationFailure(bad_value)) with self.assertRaises(OperationFailure) as context: - self.service.checkReplicaSetOrInitialize(self.cluster_object) + self.service.checkOrCreateReplicaSet(self.cluster_object) # # expected_calls = [ # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), @@ -304,24 +286,22 @@ def test_checkReplicaSetOrInitialize_OperationalFailure(self, restoreifneeded_mo # ] # # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) - self.assertEqual(str(context.exception), badvalue) - - @patch("mongoOperator.services.MongoService.MongoClient") - def test_createUsers_ok(self, mongoclient_mock): - mongoclient_mock.return_value.admin.command.return_value = self._getFixture("createUser-ok") + + self.assertEqual(str(context.exception), bad_value) + def test_createUsers_ok(self, mongo_client_mock): + mongo_client_mock.return_value.admin.command.return_value = self._getFixture("createUser-ok") self.service.createUsers(self.cluster_object) # expected_calls = [ # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), # call().admin.command("createUser", "root", **self.expected_user_create) # ] - # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) + # self.assertEqual(expected_calls, mongo_client_mock.mock_calls) - @patch("mongoOperator.services.MongoService.MongoClient") - def test_createUsers_ValueError(self, mongoclient_mock): - mongoclient_mock.return_value.admin.command.side_effect = OperationFailure("\"createUser\" had the wrong type." - " Expected string, found object"), + def test_createUsers_ValueError(self, mongo_client_mock): + mongo_client_mock.return_value.admin.command.side_effect = OperationFailure( + "\"createUser\" had the wrong type. Expected string, found object"), with self.assertRaises(OperationFailure) as context: self.service.createUsers(self.cluster_object) @@ -332,14 +312,14 @@ def test_createUsers_ValueError(self, mongoclient_mock): # ] # # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) + self.assertEqual("\"createUser\" had the wrong type. Expected string, found object", str(context.exception)) - @patch("mongoOperator.services.MongoService.MongoClient") - def test_createUsers_TimeoutError(self, mongoclient_mock): - mongoclient_mock.return_value.admin.command.side_effect = (ConnectionFailure("connection attempt failed"), - ConnectionFailure("connection attempt failed"), - ConnectionFailure("connection attempt failed"), - ConnectionFailure("connection attempt failed")) + def test_createUsers_TimeoutError(self, mongo_client_mock): + mongo_client_mock.return_value.admin.command.side_effect = ( + ConnectionFailure("connection attempt failed"), ConnectionFailure("connection attempt failed"), + ConnectionFailure("connection attempt failed"), ConnectionFailure("connection attempt failed") + ) with self.assertRaises(TimeoutError) as context: self.service.createUsers(self.cluster_object) @@ -349,5 +329,6 @@ def test_createUsers_TimeoutError(self, mongoclient_mock): # call().admin.command("createUser", "root", **self.expected_user_create) # ] # - # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) + # self.assertEqual(expected_calls, mongo_client_mock.mock_calls) + self.assertEqual("Could not execute command after 4 retries!", str(context.exception)) From f64d78b2685c6121c93d27612a8ebe510924a55a Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Wed, 13 Feb 2019 22:36:28 +0100 Subject: [PATCH 06/36] Split command and server logger tests --- tests/helpers/TestCommandLogger.py | 14 +++++++++----- tests/helpers/TestServerLogger.py | 14 +++++++++----- 2 files changed, 18 insertions(+), 10 deletions(-) diff --git a/tests/helpers/TestCommandLogger.py b/tests/helpers/TestCommandLogger.py index fb3300e..b379fd1 100644 --- a/tests/helpers/TestCommandLogger.py +++ b/tests/helpers/TestCommandLogger.py @@ -18,9 +18,13 @@ class CommandEventMock: class TestCommandLogger(TestCase): + command_logger = CommandLogger() - def test_commandLogger(self): - command_logger = CommandLogger() - command_logger.started(event = cast(CommandStartedEvent, CommandEventMock())) - command_logger.succeeded(event = cast(CommandSucceededEvent, CommandEventMock())) - command_logger.failed(event = cast(CommandFailedEvent, CommandEventMock())) + def test_started(self): + self.command_logger.started(event = cast(CommandStartedEvent, CommandEventMock())) + + def test_succeeded(self): + self.command_logger.succeeded(event = cast(CommandSucceededEvent, CommandEventMock())) + + def test_failed(self): + self.command_logger.failed(event = cast(CommandFailedEvent, CommandEventMock())) diff --git a/tests/helpers/TestServerLogger.py b/tests/helpers/TestServerLogger.py index 9dd86f9..35582ea 100644 --- a/tests/helpers/TestServerLogger.py +++ b/tests/helpers/TestServerLogger.py @@ -23,9 +23,13 @@ class ServerEventMock: class TestServerLogger(TestCase): + server_logger = ServerLogger() - def test_serverLogger(self): - command_logger = ServerLogger() - command_logger.opened(event = cast(ServerOpeningEvent, ServerEventMock())) - command_logger.closed(event = cast(ServerClosedEvent, ServerEventMock())) - command_logger.description_changed(event = cast(ServerDescriptionChangedEvent, ServerEventMock())) + def test_opened(self): + self.server_logger.opened(event = cast(ServerOpeningEvent, ServerEventMock())) + + def test_closed(self): + self.server_logger.closed(event = cast(ServerClosedEvent, ServerEventMock())) + + def test_description_changed(self): + self.server_logger.description_changed(event = cast(ServerDescriptionChangedEvent, ServerEventMock())) From 39fda8218f2365cfa3f1586c91d14d808dfd6a4a Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Wed, 13 Feb 2019 22:37:54 +0100 Subject: [PATCH 07/36] Fit strings --- .../helpers/listeners/mongo/CommandLogger.py | 17 ++++++----------- 1 file changed, 6 insertions(+), 11 deletions(-) diff --git a/mongoOperator/helpers/listeners/mongo/CommandLogger.py b/mongoOperator/helpers/listeners/mongo/CommandLogger.py index 5da25a3..25e2ba1 100644 --- a/mongoOperator/helpers/listeners/mongo/CommandLogger.py +++ b/mongoOperator/helpers/listeners/mongo/CommandLogger.py @@ -14,26 +14,21 @@ def started(self, event: CommandStartedEvent) -> None: When a command was started. :param event: The event. """ - logging.debug("Command {0.command_name} with request id " - "{0.request_id} started on server " - "{0.connection_id}".format(event)) + logging.debug("Command {0.command_name} with request id {0.request_id} started on server {0.connection_id}" + .format(event)) def succeeded(self, event: CommandSucceededEvent) -> None: """ When a command succeeded. :param event: The event. """ - logging.debug("Command {0.command_name} with request id " - "{0.request_id} on server {0.connection_id} " - "succeeded in {0.duration_micros} " - "microseconds".format(event)) + logging.debug("Command {0.command_name} with request id {0.request_id} on server {0.connection_id} succeeded " + "in {0.duration_micros} microseconds".format(event)) def failed(self, event: CommandFailedEvent) -> None: """ When a command failed. :param event: The event. """ - logging.debug("Command {0.command_name} with request id " - "{0.request_id} on server {0.connection_id} " - "failed in {0.duration_micros} " - "microseconds".format(event)) + logging.debug("Command {0.command_name} with request id {0.request_id} on server {0.connection_id} failed in " + "{0.duration_micros} microseconds".format(event)) From a916cd80c6fac40e8357baaeb48bb664012139ab Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Wed, 13 Feb 2019 22:39:23 +0100 Subject: [PATCH 08/36] Fit another string --- mongoOperator/helpers/listeners/mongo/ServerLogger.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/mongoOperator/helpers/listeners/mongo/ServerLogger.py b/mongoOperator/helpers/listeners/mongo/ServerLogger.py index b678b3a..3a134ff 100644 --- a/mongoOperator/helpers/listeners/mongo/ServerLogger.py +++ b/mongoOperator/helpers/listeners/mongo/ServerLogger.py @@ -26,8 +26,7 @@ def description_changed(self, event: ServerDescriptionChangedEvent) -> None: if new_server_type != previous_server_type: # server_type_name was added in PyMongo 3.4 logging.debug( - "Server {0.server_address} changed type from " - "{0.previous_description.server_type_name} to " + "Server {0.server_address} changed type from {0.previous_description.server_type_name} to " "{0.new_description.server_type_name}".format(event)) def closed(self, event: ServerClosedEvent) -> None: From 66e28d22236696e8eeea7e2ca99062fb9927007b Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Wed, 13 Feb 2019 22:52:14 +0100 Subject: [PATCH 09/36] Some more code fixes --- mongoOperator/helpers/ClusterChecker.py | 38 ++++++++++++------------- tests/helpers/TestClusterChecker.py | 23 ++++++++------- 2 files changed, 31 insertions(+), 30 deletions(-) diff --git a/mongoOperator/helpers/ClusterChecker.py b/mongoOperator/helpers/ClusterChecker.py index dbc37bb..98b043a 100644 --- a/mongoOperator/helpers/ClusterChecker.py +++ b/mongoOperator/helpers/ClusterChecker.py @@ -32,22 +32,6 @@ def __init__(self) -> None: AdminSecretChecker(self._kubernetes_service), ] - @staticmethod - def _parseConfiguration(cluster_dict: Dict[str, any]) -> Optional[V1MongoClusterConfiguration]: - """ - Tries to parse the given cluster configuration, returning None if the object cannot be parsed. - :param cluster_dict: The dictionary containing the configuration. - :return: The cluster configuration model, if valid, or None. - """ - try: - result = V1MongoClusterConfiguration(**cluster_dict) - result.validate() - return result - except ValueError as err: - meta = cluster_dict.get("metadata", {}) - logging.error("Could not validate cluster configuration for {} @ ns/{}: {}. The cluster will be ignored." - .format(meta.get("name"), meta.get("namespace"), err)) - def checkExistingClusters(self) -> None: """ Check all Mongo objects and see if the sub objects are available. @@ -58,7 +42,7 @@ def checkExistingClusters(self) -> None: for cluster_dict in mongo_objects["items"]: cluster_object = self._parseConfiguration(cluster_dict) if cluster_object: - self.checkCluster(cluster_object) + self._checkCluster(cluster_object) def streamEvents(self) -> None: """ @@ -77,7 +61,7 @@ def streamEvents(self) -> None: if event["type"] in ("ADDED", "MODIFIED"): cluster_object = self._parseConfiguration(event["object"]) if cluster_object: - self.checkCluster(cluster_object) + self._checkCluster(cluster_object) else: logging.warning("Could not validate cluster object, stopping event watcher.") event_watcher.stop = True @@ -100,7 +84,7 @@ def collectGarbage(self) -> None: for checker in self._resource_checkers: checker.cleanResources() - def checkCluster(self, cluster_object: V1MongoClusterConfiguration, force: bool = False) -> None: + def _checkCluster(self, cluster_object: V1MongoClusterConfiguration, force: bool = False) -> None: """ Checks whether the given cluster is configured and updated. :param cluster_object: The cluster object from the YAML file. @@ -121,3 +105,19 @@ def checkCluster(self, cluster_object: V1MongoClusterConfiguration, force: bool self._cluster_versions[key] = cluster_object.metadata.resource_version self._backup_checker.backupIfNeeded(cluster_object) + + @staticmethod + def _parseConfiguration(cluster_dict: Dict[str, any]) -> Optional[V1MongoClusterConfiguration]: + """ + Tries to parse the given cluster configuration, returning None if the object cannot be parsed. + :param cluster_dict: The dictionary containing the configuration. + :return: The cluster configuration model, if valid, or None. + """ + try: + result = V1MongoClusterConfiguration(**cluster_dict) + result.validate() + return result + except ValueError as err: + meta = cluster_dict.get("metadata", { }) + logging.error("Could not validate cluster configuration for {} @ ns/{}: {}. The cluster will be ignored." + .format(meta.get("name"), meta.get("namespace"), err)) diff --git a/tests/helpers/TestClusterChecker.py b/tests/helpers/TestClusterChecker.py index fb11cdf..2497049 100644 --- a/tests/helpers/TestClusterChecker.py +++ b/tests/helpers/TestClusterChecker.py @@ -55,11 +55,11 @@ def test_checkExistingClusters_bad_format(self): @patch("mongoOperator.services.MongoService.MongoClient") @patch("mongoOperator.helpers.BackupChecker.BackupChecker.backupIfNeeded") - def test_checkExistingClusters(self, backup_mock, mongoclient_mock): + def test_checkExistingClusters(self, backup_mock, mongo_client_mock): # checkCluster will assume cached version self.checker._cluster_versions[("mongo-cluster", self.cluster_object.metadata.namespace)] = "100" self.kubernetes_service.listMongoObjects.return_value = {"items": [self.cluster_dict]} - mongoclient_mock.return_value.admin.command.return_value = self._getMongoFixture("replica-status-ok") + mongo_client_mock.return_value.admin.command.return_value = self._getMongoFixture("replica-status-ok") self.checker.checkExistingClusters() self.assertEqual({("mongo-cluster", self.cluster_object.metadata.namespace): "100"}, self.checker._cluster_versions) @@ -68,7 +68,7 @@ def test_checkExistingClusters(self, backup_mock, mongoclient_mock): self.assertEqual(expected, self.kubernetes_service.mock_calls) backup_mock.assert_called_once_with(self.cluster_object) - @patch("mongoOperator.helpers.ClusterChecker.ClusterChecker.checkCluster") + @patch("mongoOperator.helpers.ClusterChecker.ClusterChecker._checkCluster") @patch("kubernetes.watch.watch.Watch.stream") def test_streamEvents_add_update(self, stream_mock, check_mock): updated_cluster = deepcopy(self.cluster_dict) @@ -125,12 +125,12 @@ def test_collectGarbage(self, list_mock, clean_mock): @patch("mongoOperator.services.MongoService.MongoClient") @patch("mongoOperator.helpers.BackupChecker.BackupChecker.backupIfNeeded") - def test_checkCluster_same_version(self, backup_mock, mongoclient_mock): + def test_checkCluster_same_version(self, backup_mock, mongo_client_mock): # checkCluster will assume cached version self.checker._cluster_versions[("mongo-cluster", self.cluster_object.metadata.namespace)] = "100" - mongoclient_mock.return_value.admin.command.return_value = self._getMongoFixture("replica-status-ok") + mongo_client_mock.return_value.admin.command.return_value = self._getMongoFixture("replica-status-ok") - self.checker.checkCluster(self.cluster_object) + self.checker._checkCluster(self.cluster_object) self.assertEqual({("mongo-cluster", self.cluster_object.metadata.namespace): "100"}, self.checker._cluster_versions) @@ -141,21 +141,22 @@ def test_checkCluster_same_version(self, backup_mock, mongoclient_mock): # print("actual:", repr(mongoclient_mock.mock_calls)) # print("expected:", repr(expected)) # self.assertEqual(expected, mongoclient_mock.mock_calls) + backup_mock.assert_called_once_with(self.cluster_object) @patch("mongoOperator.services.MongoService.MongoClient") @patch("mongoOperator.helpers.BackupChecker.BackupChecker.backupIfNeeded") @patch("mongoOperator.helpers.MongoResources.MongoResources.createCreateAdminCommand") @patch("mongoOperator.helpers.BaseResourceChecker.BaseResourceChecker.checkResource") - def test_checkCluster_new_version(self, check_mock, admin_mock, backup_mock, mongoclient_mock): + def test_checkCluster_new_version(self, check_mock, admin_mock, backup_mock, mongo_client_mock): admin_mock.return_value = "createUser", "foo", {} self.checker._cluster_versions[("mongo-cluster", self.cluster_object.metadata.namespace)] = "50" - mongoclient_mock.return_value.admin.command.side_effect = (self._getMongoFixture("replica-status-ok"), - self._getMongoFixture("createUser-ok")) - self.checker.checkCluster(self.cluster_object) + mongo_client_mock.return_value.admin.command.side_effect = (self._getMongoFixture("replica-status-ok"), + self._getMongoFixture("createUser-ok")) + self.checker._checkCluster(self.cluster_object) self.assertEqual({("mongo-cluster", self.cluster_object.metadata.namespace): "100"}, self.checker._cluster_versions) - expected = [call.getSecret('mongo-cluster-admin-credentials', self.cluster_object.metadata.namespace)] + expected = [call.getSecret("mongo-cluster-admin-credentials", self.cluster_object.metadata.namespace)] self.assertEqual(expected, self.kubernetes_service.mock_calls) self.assertEqual([call(self.cluster_object)] * 3, check_mock.mock_calls) backup_mock.assert_called_once_with(self.cluster_object) From b71546100e8a237b57f3d2289139256246c1c129 Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Wed, 13 Feb 2019 22:53:44 +0100 Subject: [PATCH 10/36] Remove k8s stream events code as we don't use it due to instability of that feature --- mongoOperator/helpers/ClusterChecker.py | 33 ----------------- tests/helpers/TestClusterChecker.py | 47 ------------------------- 2 files changed, 80 deletions(-) diff --git a/mongoOperator/helpers/ClusterChecker.py b/mongoOperator/helpers/ClusterChecker.py index 98b043a..a674be7 100644 --- a/mongoOperator/helpers/ClusterChecker.py +++ b/mongoOperator/helpers/ClusterChecker.py @@ -44,39 +44,6 @@ def checkExistingClusters(self) -> None: if cluster_object: self._checkCluster(cluster_object) - def streamEvents(self) -> None: - """ - Watches for changes to the mongo objects in Kubernetes and processes any changes immediately. - """ - event_watcher = Watch() - - # start watching from the latest version that we have - if self._cluster_versions: - event_watcher.resource_version = max(self._cluster_versions.values()) - - for event in event_watcher.stream(self._kubernetes_service.listMongoObjects, - _request_timeout = self.STREAM_REQUEST_TIMEOUT): - logging.info("Received event %s", event) - - if event["type"] in ("ADDED", "MODIFIED"): - cluster_object = self._parseConfiguration(event["object"]) - if cluster_object: - self._checkCluster(cluster_object) - else: - logging.warning("Could not validate cluster object, stopping event watcher.") - event_watcher.stop = True - elif event["type"] in ("DELETED",): - self.collectGarbage() - - else: - logging.warning("Could not parse event, stopping event watcher.") - event_watcher.stop = True - - # Change the resource version manually because of a bug fixed in a later version of the K8s client: - # https://github.com/kubernetes-client/python-base/pull/64 - if isinstance(event.get("object"), dict) and "resourceVersion" in event["object"].get("metadata", {}): - event_watcher.resource_version = event["object"]["metadata"]["resourceVersion"] - def collectGarbage(self) -> None: """ Cleans up any resources that are left after a cluster has been removed. diff --git a/tests/helpers/TestClusterChecker.py b/tests/helpers/TestClusterChecker.py index 2497049..2bdc4b8 100644 --- a/tests/helpers/TestClusterChecker.py +++ b/tests/helpers/TestClusterChecker.py @@ -68,53 +68,6 @@ def test_checkExistingClusters(self, backup_mock, mongo_client_mock): self.assertEqual(expected, self.kubernetes_service.mock_calls) backup_mock.assert_called_once_with(self.cluster_object) - @patch("mongoOperator.helpers.ClusterChecker.ClusterChecker._checkCluster") - @patch("kubernetes.watch.watch.Watch.stream") - def test_streamEvents_add_update(self, stream_mock, check_mock): - updated_cluster = deepcopy(self.cluster_dict) - updated_cluster["spec"]["mongodb"]["replicas"] = 5 - updated_cluster["metadata"]["resource_version"] = "200" - stream_mock.return_value = [ - {"type": "ADDED", "object": self.cluster_dict}, - {"type": "MODIFIED", "object": updated_cluster}, - ] - - self.checker.streamEvents() - - self.assertEqual([call(self.cluster_object), call(V1MongoClusterConfiguration(**updated_cluster))], - check_mock.mock_calls) - stream_mock.assert_called_once_with(self.kubernetes_service.listMongoObjects, - _request_timeout=self.checker.STREAM_REQUEST_TIMEOUT) - - @patch("mongoOperator.helpers.ClusterChecker.Watch") - def test_streamEvents_bad_event(self, watch_mock): - stream_mock = watch_mock.return_value.stream - stream_mock.return_value = [{"type": "UNKNOWN", "object": self.cluster_dict}] - self.checker.streamEvents() - stream_mock.assert_called_once_with(self.kubernetes_service.listMongoObjects, - _request_timeout=self.checker.STREAM_REQUEST_TIMEOUT) - self.assertTrue(watch_mock.return_value.stop) - - @patch("mongoOperator.helpers.ClusterChecker.ClusterChecker.collectGarbage") - @patch("kubernetes.watch.watch.Watch.stream") - def test_streamEvents_delete(self, stream_mock, garbage_mock): - stream_mock.return_value = [{"type": "DELETED"}] - self.checker.streamEvents() - garbage_mock.assert_called_once_with() - stream_mock.assert_called_once_with(self.kubernetes_service.listMongoObjects, - _request_timeout = self.checker.STREAM_REQUEST_TIMEOUT) - - @patch("mongoOperator.helpers.ClusterChecker.Watch") - def test_streamEvents_bad_cluster(self, watch_mock): - self.checker._cluster_versions[("mongo-cluster", "default")] = "100" - stream_mock = watch_mock.return_value.stream - stream_mock.return_value = [{"type": "ADDED", "object": {"thisIsNot": "a_cluster"}}] - self.checker.streamEvents() - stream_mock.assert_called_once_with(self.kubernetes_service.listMongoObjects, - _request_timeout=self.checker.STREAM_REQUEST_TIMEOUT) - self.assertTrue(watch_mock.return_value.stop) - self.assertEqual("100", watch_mock.return_value.resource_version) - @patch("mongoOperator.helpers.BaseResourceChecker.BaseResourceChecker.cleanResources") @patch("mongoOperator.helpers.BaseResourceChecker.BaseResourceChecker.listResources") def test_collectGarbage(self, list_mock, clean_mock): From 1a92f765342a34207ec34176df2f5c200d5beb4d Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Wed, 13 Feb 2019 22:54:05 +0100 Subject: [PATCH 11/36] Remove todo --- mongoOperator/MongoOperator.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/mongoOperator/MongoOperator.py b/mongoOperator/MongoOperator.py index 732379a..aa25f6a 100644 --- a/mongoOperator/MongoOperator.py +++ b/mongoOperator/MongoOperator.py @@ -29,10 +29,8 @@ def run_forever(self) -> None: try: checker.checkExistingClusters() checker.collectGarbage() - # TODO: Use checker.streamEvents() except Exception as e: logging.exception(e) - logging.info("Checks done, waiting %s seconds", self._sleep_per_run) sleep(self._sleep_per_run) except KeyboardInterrupt: From c6ea34918ff3c01c6dd0b966eb8241a5f2d7c144 Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Wed, 13 Feb 2019 22:55:09 +0100 Subject: [PATCH 12/36] Remove unneeded brackets --- mongoOperator/helpers/ClusterChecker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/mongoOperator/helpers/ClusterChecker.py b/mongoOperator/helpers/ClusterChecker.py index a674be7..e4f6c8c 100644 --- a/mongoOperator/helpers/ClusterChecker.py +++ b/mongoOperator/helpers/ClusterChecker.py @@ -57,7 +57,7 @@ def _checkCluster(self, cluster_object: V1MongoClusterConfiguration, force: bool :param cluster_object: The cluster object from the YAML file. :param force: If this is True, we will re-update the cluster even if it has been checked before. """ - key = (cluster_object.metadata.name, cluster_object.metadata.namespace) + key = cluster_object.metadata.name, cluster_object.metadata.namespace if self._cluster_versions.get(key) == cluster_object.metadata.resource_version and not force: logging.debug("Cluster object %s has been checked already in version %s.", From 4ccfa135fdfcd3a0b6af24bee2582c092b4420d2 Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Wed, 13 Feb 2019 23:09:27 +0100 Subject: [PATCH 13/36] More cleanup in tests --- mongoOperator/helpers/ClusterChecker.py | 2 +- tests/helpers/TestClusterChecker.py | 15 +++++---------- 2 files changed, 6 insertions(+), 11 deletions(-) diff --git a/mongoOperator/helpers/ClusterChecker.py b/mongoOperator/helpers/ClusterChecker.py index e4f6c8c..0b4fb53 100644 --- a/mongoOperator/helpers/ClusterChecker.py +++ b/mongoOperator/helpers/ClusterChecker.py @@ -85,6 +85,6 @@ def _parseConfiguration(cluster_dict: Dict[str, any]) -> Optional[V1MongoCluster result.validate() return result except ValueError as err: - meta = cluster_dict.get("metadata", { }) + meta = cluster_dict.get("metadata", {}) logging.error("Could not validate cluster configuration for {} @ ns/{}: {}. The cluster will be ignored." .format(meta.get("name"), meta.get("namespace"), err)) diff --git a/tests/helpers/TestClusterChecker.py b/tests/helpers/TestClusterChecker.py index 2bdc4b8..691f9cc 100644 --- a/tests/helpers/TestClusterChecker.py +++ b/tests/helpers/TestClusterChecker.py @@ -64,7 +64,6 @@ def test_checkExistingClusters(self, backup_mock, mongo_client_mock): self.assertEqual({("mongo-cluster", self.cluster_object.metadata.namespace): "100"}, self.checker._cluster_versions) expected = [call.listMongoObjects()] - print(repr(self.kubernetes_service.mock_calls)) self.assertEqual(expected, self.kubernetes_service.mock_calls) backup_mock.assert_called_once_with(self.cluster_object) @@ -80,12 +79,10 @@ def test_collectGarbage(self, list_mock, clean_mock): @patch("mongoOperator.helpers.BackupChecker.BackupChecker.backupIfNeeded") def test_checkCluster_same_version(self, backup_mock, mongo_client_mock): # checkCluster will assume cached version - self.checker._cluster_versions[("mongo-cluster", self.cluster_object.metadata.namespace)] = "100" + self.checker._cluster_versions[("mongo-cluster", "mongo-operator-cluster")] = "100" mongo_client_mock.return_value.admin.command.return_value = self._getMongoFixture("replica-status-ok") - self.checker._checkCluster(self.cluster_object) - self.assertEqual({("mongo-cluster", self.cluster_object.metadata.namespace): "100"}, - self.checker._cluster_versions) + self.assertEqual({("mongo-cluster", "mongo-operator-cluster"): "100"}, self.checker._cluster_versions) # expected = [ # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), @@ -103,15 +100,13 @@ def test_checkCluster_same_version(self, backup_mock, mongo_client_mock): @patch("mongoOperator.helpers.BaseResourceChecker.BaseResourceChecker.checkResource") def test_checkCluster_new_version(self, check_mock, admin_mock, backup_mock, mongo_client_mock): admin_mock.return_value = "createUser", "foo", {} - self.checker._cluster_versions[("mongo-cluster", self.cluster_object.metadata.namespace)] = "50" + self.checker._cluster_versions[("mongo-cluster", "mongo-operator-cluster")] = "50" mongo_client_mock.return_value.admin.command.side_effect = (self._getMongoFixture("replica-status-ok"), self._getMongoFixture("createUser-ok")) self.checker._checkCluster(self.cluster_object) - self.assertEqual({("mongo-cluster", self.cluster_object.metadata.namespace): "100"}, - self.checker._cluster_versions) - expected = [call.getSecret("mongo-cluster-admin-credentials", self.cluster_object.metadata.namespace)] + self.assertEqual({("mongo-cluster", "mongo-operator-cluster"): "100"}, self.checker._cluster_versions) + expected = [call.getSecret("mongo-cluster-admin-credentials", "mongo-operator-cluster")] self.assertEqual(expected, self.kubernetes_service.mock_calls) self.assertEqual([call(self.cluster_object)] * 3, check_mock.mock_calls) backup_mock.assert_called_once_with(self.cluster_object) - self.assertEqual([call(self.kubernetes_service.getSecret())], admin_mock.mock_calls) From 0a8c3fa538d76cee3717bfdaa114479ba7f39e67 Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Wed, 13 Feb 2019 23:11:29 +0100 Subject: [PATCH 14/36] Remove old test code --- tests/helpers/TestClusterChecker.py | 12 ------------ 1 file changed, 12 deletions(-) diff --git a/tests/helpers/TestClusterChecker.py b/tests/helpers/TestClusterChecker.py index 691f9cc..ff5daa6 100644 --- a/tests/helpers/TestClusterChecker.py +++ b/tests/helpers/TestClusterChecker.py @@ -1,7 +1,6 @@ # Copyright (c) 2018 Ultimaker # !/usr/bin/env python # -*- coding: utf-8 -*- -from copy import deepcopy from unittest import TestCase from unittest.mock import patch, call from mongoOperator.helpers.ClusterChecker import ClusterChecker @@ -56,7 +55,6 @@ def test_checkExistingClusters_bad_format(self): @patch("mongoOperator.services.MongoService.MongoClient") @patch("mongoOperator.helpers.BackupChecker.BackupChecker.backupIfNeeded") def test_checkExistingClusters(self, backup_mock, mongo_client_mock): - # checkCluster will assume cached version self.checker._cluster_versions[("mongo-cluster", self.cluster_object.metadata.namespace)] = "100" self.kubernetes_service.listMongoObjects.return_value = {"items": [self.cluster_dict]} mongo_client_mock.return_value.admin.command.return_value = self._getMongoFixture("replica-status-ok") @@ -78,20 +76,10 @@ def test_collectGarbage(self, list_mock, clean_mock): @patch("mongoOperator.services.MongoService.MongoClient") @patch("mongoOperator.helpers.BackupChecker.BackupChecker.backupIfNeeded") def test_checkCluster_same_version(self, backup_mock, mongo_client_mock): - # checkCluster will assume cached version self.checker._cluster_versions[("mongo-cluster", "mongo-operator-cluster")] = "100" mongo_client_mock.return_value.admin.command.return_value = self._getMongoFixture("replica-status-ok") self.checker._checkCluster(self.cluster_object) self.assertEqual({("mongo-cluster", "mongo-operator-cluster"): "100"}, self.checker._cluster_versions) - - # expected = [ - # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), - # call().admin.command('replSetGetStatus') - # ] - # print("actual:", repr(mongoclient_mock.mock_calls)) - # print("expected:", repr(expected)) - # self.assertEqual(expected, mongoclient_mock.mock_calls) - backup_mock.assert_called_once_with(self.cluster_object) @patch("mongoOperator.services.MongoService.MongoClient") From 80ee726160df490d445740319d14e0031b15cb38 Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Wed, 13 Feb 2019 23:15:22 +0100 Subject: [PATCH 15/36] Reformat json example responses --- .../mongo_responses/createUser-ok.json | 25 ++++++++++++++++++- .../fixtures/mongo_responses/initiate-ok.json | 25 ++++++++++++++++++- 2 files changed, 48 insertions(+), 2 deletions(-) diff --git a/tests/fixtures/mongo_responses/createUser-ok.json b/tests/fixtures/mongo_responses/createUser-ok.json index 0897e4e..518a84c 100644 --- a/tests/fixtures/mongo_responses/createUser-ok.json +++ b/tests/fixtures/mongo_responses/createUser-ok.json @@ -1 +1,24 @@ -{"ok": 1.0, "operationTime": {"$timestamp": {"t": 1549962075, "i": 4}}, "$clusterTime": {"clusterTime": {"$timestamp": {"t": 1549962075, "i": 4}}, "signature": {"hash": {"$binary": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", "$type": "00"}, "keyId": 0}}} \ No newline at end of file +{ + "ok": 1.0, + "operationTime": { + "$timestamp": { + "t": 1549962075, + "i": 4 + } + }, + "$clusterTime": { + "clusterTime": { + "$timestamp": { + "t": 1549962075, + "i": 4 + } + }, + "signature": { + "hash": { + "$binary": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", + "$type": "00" + }, + "keyId": 0 + } + } +} \ No newline at end of file diff --git a/tests/fixtures/mongo_responses/initiate-ok.json b/tests/fixtures/mongo_responses/initiate-ok.json index b8a4e62..b5ee4db 100644 --- a/tests/fixtures/mongo_responses/initiate-ok.json +++ b/tests/fixtures/mongo_responses/initiate-ok.json @@ -1 +1,24 @@ -{"ok": 1.0, "operationTime": {"$timestamp": {"t": 1549963040, "i": 1}}, "$clusterTime": {"clusterTime": {"$timestamp": {"t": 1549963040, "i": 1}}, "signature": {"hash": {"$binary": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", "$type": "00"}, "keyId": 0}}} \ No newline at end of file +{ + "ok": 1.0, + "operationTime": { + "$timestamp": { + "t": 1549963040, + "i": 1 + } + }, + "$clusterTime": { + "clusterTime": { + "$timestamp": { + "t": 1549963040, + "i": 1 + } + }, + "signature": { + "hash": { + "$binary": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", + "$type": "00" + }, + "keyId": 0 + } + } +} \ No newline at end of file From b784cbb5d662366248be98a9b3872b782d1425c9 Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Wed, 13 Feb 2019 23:21:50 +0100 Subject: [PATCH 16/36] Some more code style, add TODO --- tests/helpers/TestAdminSecretChecker.py | 10 +++++++--- tests/helpers/TestBackupChecker.py | 22 +++++++++------------- tests/helpers/TestBaseResourceChecker.py | 12 +++++++----- tests/helpers/TestRestoreHelper.py | 1 + tests/helpers/TestServiceChecker.py | 4 +++- tests/helpers/TestStatefulSetChecker.py | 7 ++++--- 6 files changed, 31 insertions(+), 25 deletions(-) diff --git a/tests/helpers/TestAdminSecretChecker.py b/tests/helpers/TestAdminSecretChecker.py index 7323eb7..1e64455 100644 --- a/tests/helpers/TestAdminSecretChecker.py +++ b/tests/helpers/TestAdminSecretChecker.py @@ -1,11 +1,13 @@ # Copyright (c) 2018 Ultimaker # !/usr/bin/env python # -*- coding: utf-8 -*- +from typing import cast from unittest import TestCase from unittest.mock import MagicMock, patch from mongoOperator.helpers.AdminSecretChecker import AdminSecretChecker from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration +from mongoOperator.services.KubernetesService import KubernetesService from tests.test_utils import getExampleClusterDefinition @@ -14,7 +16,7 @@ class TestAdminSecretChecker(TestCase): def setUp(self): super().setUp() self.kubernetes_service = MagicMock() - self.checker = AdminSecretChecker(self.kubernetes_service) + self.checker = AdminSecretChecker(cast(KubernetesService, self.kubernetes_service)) self.cluster_object = V1MongoClusterConfiguration(**getExampleClusterDefinition()) self.secret_name = self.cluster_object.metadata.name + "-admin-credentials" @@ -38,7 +40,8 @@ def test_createResource(self, b64encode_mock): result = self.checker.createResource(self.cluster_object) self.assertEqual(self.kubernetes_service.createSecret.return_value, result) self.kubernetes_service.createSecret.assert_called_once_with( - self.secret_name, self.cluster_object.metadata.namespace, {"username": "root", "password": "random-password"} + self.secret_name, self.cluster_object.metadata.namespace, {"username": "root", + "password": "random-password"} ) @patch("mongoOperator.helpers.AdminSecretChecker.b64encode") @@ -47,7 +50,8 @@ def test_updateResource(self, b64encode_mock): result = self.checker.updateResource(self.cluster_object) self.assertEqual(self.kubernetes_service.updateSecret.return_value, result) self.kubernetes_service.updateSecret.assert_called_once_with( - self.secret_name, self.cluster_object.metadata.namespace, {"username": "root", "password": "random-password"} + self.secret_name, self.cluster_object.metadata.namespace, {"username": "root", + "password": "random-password"} ) def test_deleteResource(self): diff --git a/tests/helpers/TestBackupChecker.py b/tests/helpers/TestBackupChecker.py index f3bf27c..117d690 100644 --- a/tests/helpers/TestBackupChecker.py +++ b/tests/helpers/TestBackupChecker.py @@ -3,6 +3,7 @@ # -*- coding: utf-8 -*- import json from base64 import b64encode +from typing import cast from kubernetes.client import V1Secret from subprocess import CalledProcessError, SubprocessError @@ -13,6 +14,7 @@ from mongoOperator.helpers.BackupChecker import BackupChecker from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration +from mongoOperator.services.KubernetesService import KubernetesService from tests.test_utils import getExampleClusterDefinition @@ -21,7 +23,7 @@ def setUp(self): self.cluster_dict = getExampleClusterDefinition() self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) self.kubernetes_service = MagicMock() - self.checker = BackupChecker(self.kubernetes_service) + self.checker = BackupChecker(cast(KubernetesService, self.kubernetes_service)) self.dummy_credentials = b64encode(json.dumps({"user": "password"}).encode()) self.kubernetes_service.getSecret.return_value = V1Secret(data={"json": self.dummy_credentials}) @@ -81,16 +83,15 @@ def test_backupIfNeeded_check_if_needed(self, backup_mock): @patch("mongoOperator.helpers.BackupChecker.check_output") def test_backup(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock): current_date = datetime(2018, 2, 28, 14, 0, 0) - expected_backup_name = "mongodb-backup-" + self.cluster_object.metadata.namespace +\ - "-mongo-cluster-2018-02-28_140000.archive.gz" + expected_backup_name = "mongodb-backup-mongo-operator-cluster-mongo-cluster-2018-02-28_140000.archive.gz" self.checker.backup(self.cluster_object, current_date) - self.assertEqual([call.getSecret('storage-serviceaccount', self.cluster_object.metadata.namespace)], self.kubernetes_service.mock_calls) + self.assertEqual([call.getSecret('storage-serviceaccount', 'mongo-operator-cluster')], + self.kubernetes_service.mock_calls) subprocess_mock.assert_called_once_with([ - 'mongodump', '--host', 'mongo-cluster-2.mongo-cluster.' + self.cluster_object.metadata.namespace + - '.svc.cluster.local', '--gzip', + 'mongodump', '--host', 'mongo-cluster-2.mongo-cluster.mongo-operator-cluster.svc.cluster.local', '--gzip', '--archive=/tmp/' + expected_backup_name ]) @@ -112,19 +113,15 @@ def test_backup(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock): @patch("mongoOperator.helpers.BackupChecker.check_output") def test_backup_mongo_error(self, subprocess_mock): subprocess_mock.side_effect = CalledProcessError(3, "cmd", "output", "error") - current_date = datetime(2018, 2, 28, 14, 0, 0) with self.assertRaises(SubprocessError) as context: self.checker.backup(self.cluster_object, current_date) - self.assertEqual("Could not backup 'mongo-cluster-2.mongo-cluster." + self.cluster_object.metadata.namespace + - ".svc.cluster.local' to " - "'/tmp/mongodb-backup-" + self.cluster_object.metadata.namespace + - "-mongo-cluster-2018-02-28_140000.archive.gz'. " + self.assertEqual("Could not backup 'mongo-cluster-2.mongo-cluster.mongo-operator-cluster.svc.cluster.local' to " + "'/tmp/mongodb-backup-mongo-operator-cluster-mongo-cluster-2018-02-28_140000.archive.gz'. " "Return code: 3\n stderr: 'error'\n stdout: 'output'", str(context.exception)) - self.assertEqual(1, subprocess_mock.call_count) @patch("mongoOperator.helpers.BackupChecker.check_output") @@ -133,5 +130,4 @@ def test_backup_gcs_bad_credentials(self, subprocess_mock): with self.assertRaises(ValueError) as context: self.checker.backup(self.cluster_object, current_date) self.assertIn("Service account info was not in the expected format", str(context.exception)) - self.assertEqual(1, subprocess_mock.call_count) diff --git a/tests/helpers/TestBaseResourceChecker.py b/tests/helpers/TestBaseResourceChecker.py index 3af1713..476c25c 100644 --- a/tests/helpers/TestBaseResourceChecker.py +++ b/tests/helpers/TestBaseResourceChecker.py @@ -1,6 +1,7 @@ # Copyright (c) 2018 Ultimaker # !/usr/bin/env python # -*- coding: utf-8 -*- +from typing import cast from unittest import TestCase from unittest.mock import MagicMock, call @@ -8,6 +9,7 @@ from mongoOperator.helpers.BaseResourceChecker import BaseResourceChecker from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration +from mongoOperator.services.KubernetesService import KubernetesService from tests.test_utils import getExampleClusterDefinition @@ -16,7 +18,7 @@ class TestBaseResourceChecker(TestCase): def setUp(self): self.kubernetes_service = MagicMock() - self.checker = BaseResourceChecker(self.kubernetes_service) + self.checker = BaseResourceChecker(cast(KubernetesService, self.kubernetes_service)) self.cluster_object = V1MongoClusterConfiguration(**getExampleClusterDefinition()) def test_getClusterName(self): @@ -56,7 +58,7 @@ def test_cleanResources_found(self): self.kubernetes_service.getMongoObject.return_value = self.cluster_object self.checker.listResources = MagicMock(return_value=[self.cluster_object]) self.checker.cleanResources() - self.assertEqual([call.getMongoObject('mongo-cluster', self.cluster_object.metadata.namespace)], + self.assertEqual([call.getMongoObject("mongo-cluster", "mongo-operator-cluster")], self.kubernetes_service.mock_calls) def test_cleanResources_not_found(self): @@ -64,16 +66,16 @@ def test_cleanResources_not_found(self): self.checker.listResources = MagicMock(return_value=[self.cluster_object]) self.checker.deleteResource = MagicMock() self.checker.cleanResources() - self.assertEqual([call.getMongoObject('mongo-cluster', self.cluster_object.metadata.namespace)], + self.assertEqual([call.getMongoObject('mongo-cluster', "mongo-operator-cluster")], self.kubernetes_service.mock_calls) - self.checker.deleteResource.assert_called_once_with('mongo-cluster', self.cluster_object.metadata.namespace) + self.checker.deleteResource.assert_called_once_with('mongo-cluster', "mongo-operator-cluster") def test_cleanResources_error(self): self.kubernetes_service.getMongoObject.side_effect = ApiException(400) self.checker.listResources = MagicMock(return_value=[self.cluster_object]) with self.assertRaises(ApiException): self.checker.cleanResources() - self.assertEqual([call.getMongoObject('mongo-cluster', self.cluster_object.metadata.namespace)], + self.assertEqual([call.getMongoObject('mongo-cluster', "mongo-operator-cluster")], self.kubernetes_service.mock_calls) def test_listResources(self): diff --git a/tests/helpers/TestRestoreHelper.py b/tests/helpers/TestRestoreHelper.py index 38544a0..d28349b 100644 --- a/tests/helpers/TestRestoreHelper.py +++ b/tests/helpers/TestRestoreHelper.py @@ -115,6 +115,7 @@ def test_restore_mongo_error(self, subprocess_mock, gcs_service_mock, storage_mo self.assertEqual("Could not restore '" + expected_backup_name + "' after 4 retries!", str(context.exception)) self.assertEqual(4, subprocess_mock.call_count) + # TODO: assert calls on unused mocks @patch("mongoOperator.helpers.RestoreHelper.check_output") def test_restore_gcs_bad_credentials(self, subprocess_mock): diff --git a/tests/helpers/TestServiceChecker.py b/tests/helpers/TestServiceChecker.py index 501992f..33e58cb 100644 --- a/tests/helpers/TestServiceChecker.py +++ b/tests/helpers/TestServiceChecker.py @@ -1,11 +1,13 @@ # Copyright (c) 2018 Ultimaker # !/usr/bin/env python # -*- coding: utf-8 -*- +from typing import cast from unittest import TestCase from unittest.mock import MagicMock from mongoOperator.helpers.ServiceChecker import ServiceChecker from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration +from mongoOperator.services.KubernetesService import KubernetesService from tests.test_utils import getExampleClusterDefinition @@ -14,7 +16,7 @@ class TestServiceChecker(TestCase): def setUp(self): super().setUp() self.kubernetes_service = MagicMock() - self.checker = ServiceChecker(self.kubernetes_service) + self.checker = ServiceChecker(cast(KubernetesService, self.kubernetes_service)) self.cluster_object = V1MongoClusterConfiguration(**getExampleClusterDefinition()) def test_listResources(self): diff --git a/tests/helpers/TestStatefulSetChecker.py b/tests/helpers/TestStatefulSetChecker.py index ee08e2d..63dc919 100644 --- a/tests/helpers/TestStatefulSetChecker.py +++ b/tests/helpers/TestStatefulSetChecker.py @@ -1,11 +1,13 @@ # Copyright (c) 2018 Ultimaker # !/usr/bin/env python # -*- coding: utf-8 -*- +from typing import cast from unittest import TestCase from unittest.mock import MagicMock from mongoOperator.helpers.StatefulSetChecker import StatefulSetChecker from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration +from mongoOperator.services.KubernetesService import KubernetesService from tests.test_utils import getExampleClusterDefinition @@ -14,7 +16,7 @@ class TestStatefulSetChecker(TestCase): def setUp(self): super().setUp() self.kubernetes_service = MagicMock() - self.checker = StatefulSetChecker(self.kubernetes_service) + self.checker = StatefulSetChecker(cast(KubernetesService, self.kubernetes_service)) self.cluster_object = V1MongoClusterConfiguration(**getExampleClusterDefinition()) def test_listResources(self): @@ -39,8 +41,7 @@ def test_updateResource(self): self.kubernetes_service.updateStatefulSet.assert_called_once_with(self.cluster_object) def test_deleteResource(self): - result = self.checker.deleteResource(self.cluster_object.metadata.name, - self.cluster_object.metadata.namespace) + result = self.checker.deleteResource(self.cluster_object.metadata.name, self.cluster_object.metadata.namespace) self.assertEqual(self.kubernetes_service.deleteStatefulSet.return_value, result) self.kubernetes_service.deleteStatefulSet.assert_called_once_with( self.cluster_object.metadata.name, self.cluster_object.metadata.namespace From 6bd93b5ee04b1b528ac1d5c32f9da32aba39cb00 Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Wed, 13 Feb 2019 23:24:25 +0100 Subject: [PATCH 17/36] Remove unused import --- mongoOperator/helpers/ClusterChecker.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/mongoOperator/helpers/ClusterChecker.py b/mongoOperator/helpers/ClusterChecker.py index 0b4fb53..1da2cba 100644 --- a/mongoOperator/helpers/ClusterChecker.py +++ b/mongoOperator/helpers/ClusterChecker.py @@ -4,8 +4,6 @@ import logging from typing import Dict, List, Tuple, Optional -from kubernetes.watch import Watch - from mongoOperator.helpers.AdminSecretChecker import AdminSecretChecker from mongoOperator.helpers.BackupChecker import BackupChecker from mongoOperator.helpers.BaseResourceChecker import BaseResourceChecker From 67fc2be42a97f1c185860cc8b7e25700f42c29f4 Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Wed, 13 Feb 2019 23:37:09 +0100 Subject: [PATCH 18/36] move some files around --- .../ClusterChecker.py => ClusterManager.py} | 12 +++++------ mongoOperator/MongoOperator.py | 2 +- .../{BackupChecker.py => BackupHelper.py} | 2 +- mongoOperator/helpers/RestoreHelper.py | 1 - .../AdminSecretChecker.py | 2 +- .../BaseResourceChecker.py | 0 .../{ => resourceCheckers}/ServiceChecker.py | 2 +- .../StatefulSetChecker.py | 2 +- .../helpers/resourceCheckers/__init__.py | 0 mongoOperator/services/MongoService.py | 2 +- tests/helpers/TestAdminSecretChecker.py | 6 +++--- tests/helpers/TestBackupChecker.py | 20 +++++++++---------- tests/helpers/TestBaseResourceChecker.py | 2 +- tests/helpers/TestClusterChecker.py | 16 +++++++-------- tests/helpers/TestServiceChecker.py | 2 +- tests/helpers/TestStatefulSetChecker.py | 2 +- 16 files changed, 36 insertions(+), 37 deletions(-) rename mongoOperator/{helpers/ClusterChecker.py => ClusterManager.py} (89%) rename mongoOperator/helpers/{BackupChecker.py => BackupHelper.py} (99%) rename mongoOperator/helpers/{ => resourceCheckers}/AdminSecretChecker.py (96%) rename mongoOperator/helpers/{ => resourceCheckers}/BaseResourceChecker.py (100%) rename mongoOperator/helpers/{ => resourceCheckers}/ServiceChecker.py (93%) rename mongoOperator/helpers/{ => resourceCheckers}/StatefulSetChecker.py (93%) create mode 100644 mongoOperator/helpers/resourceCheckers/__init__.py diff --git a/mongoOperator/helpers/ClusterChecker.py b/mongoOperator/ClusterManager.py similarity index 89% rename from mongoOperator/helpers/ClusterChecker.py rename to mongoOperator/ClusterManager.py index 1da2cba..dae5a82 100644 --- a/mongoOperator/helpers/ClusterChecker.py +++ b/mongoOperator/ClusterManager.py @@ -4,11 +4,11 @@ import logging from typing import Dict, List, Tuple, Optional -from mongoOperator.helpers.AdminSecretChecker import AdminSecretChecker -from mongoOperator.helpers.BackupChecker import BackupChecker -from mongoOperator.helpers.BaseResourceChecker import BaseResourceChecker -from mongoOperator.helpers.ServiceChecker import ServiceChecker -from mongoOperator.helpers.StatefulSetChecker import StatefulSetChecker +from mongoOperator.helpers.resourceCheckers.AdminSecretChecker import AdminSecretChecker +from mongoOperator.helpers.BackupHelper import BackupHelper +from mongoOperator.helpers.resourceCheckers.BaseResourceChecker import BaseResourceChecker +from mongoOperator.helpers.resourceCheckers.ServiceChecker import ServiceChecker +from mongoOperator.helpers.resourceCheckers.StatefulSetChecker import StatefulSetChecker from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration from mongoOperator.services.KubernetesService import KubernetesService from mongoOperator.services.MongoService import MongoService @@ -23,7 +23,7 @@ def __init__(self) -> None: self._cluster_versions: Dict[Tuple[str, str], str] = {} # format: {(cluster_name, namespace): resource_version} self._kubernetes_service = KubernetesService() self._mongo_service = MongoService(self._kubernetes_service) - self._backup_checker = BackupChecker(self._kubernetes_service) + self._backup_checker = BackupHelper(self._kubernetes_service) self._resource_checkers: List[BaseResourceChecker] = [ ServiceChecker(self._kubernetes_service), StatefulSetChecker(self._kubernetes_service), diff --git a/mongoOperator/MongoOperator.py b/mongoOperator/MongoOperator.py index aa25f6a..301a95b 100644 --- a/mongoOperator/MongoOperator.py +++ b/mongoOperator/MongoOperator.py @@ -4,7 +4,7 @@ import logging from time import sleep -from mongoOperator.helpers.ClusterChecker import ClusterChecker +from mongoOperator.ClusterManager import ClusterChecker class MongoOperator: diff --git a/mongoOperator/helpers/BackupChecker.py b/mongoOperator/helpers/BackupHelper.py similarity index 99% rename from mongoOperator/helpers/BackupChecker.py rename to mongoOperator/helpers/BackupHelper.py index 87df3c4..33432ed 100644 --- a/mongoOperator/helpers/BackupChecker.py +++ b/mongoOperator/helpers/BackupHelper.py @@ -18,7 +18,7 @@ from mongoOperator.services.KubernetesService import KubernetesService -class BackupChecker: +class BackupHelper: """ Class responsible for handling the Backups for the Mongo cluster. """ diff --git a/mongoOperator/helpers/RestoreHelper.py b/mongoOperator/helpers/RestoreHelper.py index c7e7de8..e82dd8b 100644 --- a/mongoOperator/helpers/RestoreHelper.py +++ b/mongoOperator/helpers/RestoreHelper.py @@ -14,7 +14,6 @@ from mongoOperator.helpers.MongoResources import MongoResources from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration from mongoOperator.services.KubernetesService import KubernetesService -# from mongoOperator.services.MongoService import MongoService class RestoreHelper: diff --git a/mongoOperator/helpers/AdminSecretChecker.py b/mongoOperator/helpers/resourceCheckers/AdminSecretChecker.py similarity index 96% rename from mongoOperator/helpers/AdminSecretChecker.py rename to mongoOperator/helpers/resourceCheckers/AdminSecretChecker.py index dd20b91..c97f6c0 100644 --- a/mongoOperator/helpers/AdminSecretChecker.py +++ b/mongoOperator/helpers/resourceCheckers/AdminSecretChecker.py @@ -7,7 +7,7 @@ from kubernetes.client import V1Secret, V1Status from typing import List, Dict -from mongoOperator.helpers.BaseResourceChecker import BaseResourceChecker +from mongoOperator.helpers.resourceCheckers.BaseResourceChecker import BaseResourceChecker from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration diff --git a/mongoOperator/helpers/BaseResourceChecker.py b/mongoOperator/helpers/resourceCheckers/BaseResourceChecker.py similarity index 100% rename from mongoOperator/helpers/BaseResourceChecker.py rename to mongoOperator/helpers/resourceCheckers/BaseResourceChecker.py diff --git a/mongoOperator/helpers/ServiceChecker.py b/mongoOperator/helpers/resourceCheckers/ServiceChecker.py similarity index 93% rename from mongoOperator/helpers/ServiceChecker.py rename to mongoOperator/helpers/resourceCheckers/ServiceChecker.py index 5691fdf..7afe704 100644 --- a/mongoOperator/helpers/ServiceChecker.py +++ b/mongoOperator/helpers/resourceCheckers/ServiceChecker.py @@ -5,7 +5,7 @@ from kubernetes.client import V1Service, V1Status -from mongoOperator.helpers.BaseResourceChecker import BaseResourceChecker +from mongoOperator.helpers.resourceCheckers.BaseResourceChecker import BaseResourceChecker from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration diff --git a/mongoOperator/helpers/StatefulSetChecker.py b/mongoOperator/helpers/resourceCheckers/StatefulSetChecker.py similarity index 93% rename from mongoOperator/helpers/StatefulSetChecker.py rename to mongoOperator/helpers/resourceCheckers/StatefulSetChecker.py index 8df8cca..cbf93c8 100644 --- a/mongoOperator/helpers/StatefulSetChecker.py +++ b/mongoOperator/helpers/resourceCheckers/StatefulSetChecker.py @@ -5,7 +5,7 @@ from kubernetes.client import V1StatefulSet, V1Status -from mongoOperator.helpers.BaseResourceChecker import BaseResourceChecker +from mongoOperator.helpers.resourceCheckers.BaseResourceChecker import BaseResourceChecker from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration diff --git a/mongoOperator/helpers/resourceCheckers/__init__.py b/mongoOperator/helpers/resourceCheckers/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/mongoOperator/services/MongoService.py b/mongoOperator/services/MongoService.py index 2a97881..a38ff95 100644 --- a/mongoOperator/services/MongoService.py +++ b/mongoOperator/services/MongoService.py @@ -8,7 +8,7 @@ from pymongo import MongoClient from pymongo.errors import ConnectionFailure, OperationFailure -from mongoOperator.helpers.AdminSecretChecker import AdminSecretChecker +from mongoOperator.helpers.resourceCheckers.AdminSecretChecker import AdminSecretChecker from mongoOperator.helpers.MongoResources import MongoResources from mongoOperator.helpers.RestoreHelper import RestoreHelper from mongoOperator.helpers.listeners.mongo.CommandLogger import CommandLogger diff --git a/tests/helpers/TestAdminSecretChecker.py b/tests/helpers/TestAdminSecretChecker.py index 1e64455..77edacf 100644 --- a/tests/helpers/TestAdminSecretChecker.py +++ b/tests/helpers/TestAdminSecretChecker.py @@ -5,7 +5,7 @@ from unittest import TestCase from unittest.mock import MagicMock, patch -from mongoOperator.helpers.AdminSecretChecker import AdminSecretChecker +from mongoOperator.helpers.resourceCheckers.AdminSecretChecker import AdminSecretChecker from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration from mongoOperator.services.KubernetesService import KubernetesService from tests.test_utils import getExampleClusterDefinition @@ -34,7 +34,7 @@ def test_getResource(self): self.cluster_object.metadata.namespace) self.assertEqual(self.kubernetes_service.getSecret.return_value, result) - @patch("mongoOperator.helpers.AdminSecretChecker.b64encode") + @patch("mongoOperator.helpers.resourceCheckers.AdminSecretChecker.b64encode") def test_createResource(self, b64encode_mock): b64encode_mock.return_value = b"random-password" result = self.checker.createResource(self.cluster_object) @@ -44,7 +44,7 @@ def test_createResource(self, b64encode_mock): "password": "random-password"} ) - @patch("mongoOperator.helpers.AdminSecretChecker.b64encode") + @patch("mongoOperator.helpers.resourceCheckers.AdminSecretChecker.b64encode") def test_updateResource(self, b64encode_mock): b64encode_mock.return_value = b"random-password" result = self.checker.updateResource(self.cluster_object) diff --git a/tests/helpers/TestBackupChecker.py b/tests/helpers/TestBackupChecker.py index 117d690..e341f3c 100644 --- a/tests/helpers/TestBackupChecker.py +++ b/tests/helpers/TestBackupChecker.py @@ -12,7 +12,7 @@ from unittest import TestCase from unittest.mock import MagicMock, patch, call -from mongoOperator.helpers.BackupChecker import BackupChecker +from mongoOperator.helpers.BackupHelper import BackupHelper from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration from mongoOperator.services.KubernetesService import KubernetesService from tests.test_utils import getExampleClusterDefinition @@ -23,7 +23,7 @@ def setUp(self): self.cluster_dict = getExampleClusterDefinition() self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) self.kubernetes_service = MagicMock() - self.checker = BackupChecker(cast(KubernetesService, self.kubernetes_service)) + self.checker = BackupHelper(cast(KubernetesService, self.kubernetes_service)) self.dummy_credentials = b64encode(json.dumps({"user": "password"}).encode()) self.kubernetes_service.getSecret.return_value = V1Secret(data={"json": self.dummy_credentials}) @@ -34,7 +34,7 @@ def test__utcNow(self): after = datetime.utcnow() self.assertTrue(before <= actual <= after) - @patch("mongoOperator.helpers.BackupChecker.BackupChecker.backup") + @patch("mongoOperator.helpers.BackupHelper.BackupHelper.backup") def test_backupIfNeeded_check_if_needed(self, backup_mock): # this backup is executed every hour at 0 minutes. self.assertEqual("0 * * * *", self.cluster_object.spec.backups.cron) @@ -44,7 +44,7 @@ def test_backupIfNeeded_check_if_needed(self, backup_mock): expected_calls = [] current_date = datetime(2018, 2, 28, 12, 30, 0) - with patch("mongoOperator.helpers.BackupChecker.BackupChecker._utcNow", lambda _: current_date): + with patch("mongoOperator.helpers.BackupHelper.BackupHelper._utcNow", lambda _: current_date): # on the first run, it should backup regardless of the time. self.checker.backupIfNeeded(self.cluster_object) expected_calls.append(call(self.cluster_object, current_date)) @@ -77,10 +77,10 @@ def test_backupIfNeeded_check_if_needed(self, backup_mock): self.assertEqual(expected_calls, backup_mock.mock_calls) self.assertEqual({key: current_date}, self.checker._last_backups) - @patch("mongoOperator.helpers.BackupChecker.os") - @patch("mongoOperator.helpers.BackupChecker.StorageClient") - @patch("mongoOperator.helpers.BackupChecker.ServiceCredentials") - @patch("mongoOperator.helpers.BackupChecker.check_output") + @patch("mongoOperator.helpers.BackupHelper.os") + @patch("mongoOperator.helpers.BackupHelper.StorageClient") + @patch("mongoOperator.helpers.BackupHelper.ServiceCredentials") + @patch("mongoOperator.helpers.BackupHelper.check_output") def test_backup(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock): current_date = datetime(2018, 2, 28, 14, 0, 0) expected_backup_name = "mongodb-backup-mongo-operator-cluster-mongo-cluster-2018-02-28_140000.archive.gz" @@ -110,7 +110,7 @@ def test_backup(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock): expected_os_call = call.remove('/tmp/' + expected_backup_name) self.assertEqual([expected_os_call], os_mock.mock_calls) - @patch("mongoOperator.helpers.BackupChecker.check_output") + @patch("mongoOperator.helpers.BackupHelper.check_output") def test_backup_mongo_error(self, subprocess_mock): subprocess_mock.side_effect = CalledProcessError(3, "cmd", "output", "error") current_date = datetime(2018, 2, 28, 14, 0, 0) @@ -124,7 +124,7 @@ def test_backup_mongo_error(self, subprocess_mock): str(context.exception)) self.assertEqual(1, subprocess_mock.call_count) - @patch("mongoOperator.helpers.BackupChecker.check_output") + @patch("mongoOperator.helpers.BackupHelper.check_output") def test_backup_gcs_bad_credentials(self, subprocess_mock): current_date = datetime(2018, 2, 28, 14, 0, 0) with self.assertRaises(ValueError) as context: diff --git a/tests/helpers/TestBaseResourceChecker.py b/tests/helpers/TestBaseResourceChecker.py index 476c25c..6272162 100644 --- a/tests/helpers/TestBaseResourceChecker.py +++ b/tests/helpers/TestBaseResourceChecker.py @@ -7,7 +7,7 @@ from kubernetes.client.rest import ApiException -from mongoOperator.helpers.BaseResourceChecker import BaseResourceChecker +from mongoOperator.helpers.resourceCheckers.BaseResourceChecker import BaseResourceChecker from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration from mongoOperator.services.KubernetesService import KubernetesService from tests.test_utils import getExampleClusterDefinition diff --git a/tests/helpers/TestClusterChecker.py b/tests/helpers/TestClusterChecker.py index ff5daa6..91a5ac0 100644 --- a/tests/helpers/TestClusterChecker.py +++ b/tests/helpers/TestClusterChecker.py @@ -3,7 +3,7 @@ # -*- coding: utf-8 -*- from unittest import TestCase from unittest.mock import patch, call -from mongoOperator.helpers.ClusterChecker import ClusterChecker +from mongoOperator.ClusterManager import ClusterChecker from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration from tests.test_utils import getExampleClusterDefinition from bson.json_util import loads @@ -14,7 +14,7 @@ class TestClusterChecker(TestCase): def setUp(self): super().setUp() - with patch("mongoOperator.helpers.ClusterChecker.KubernetesService") as ks: + with patch("mongoOperator.ClusterManager.KubernetesService") as ks: self.checker = ClusterChecker() self.kubernetes_service = ks.return_value self.cluster_dict = getExampleClusterDefinition() @@ -53,7 +53,7 @@ def test_checkExistingClusters_bad_format(self): self.assertEqual({}, self.checker._cluster_versions) @patch("mongoOperator.services.MongoService.MongoClient") - @patch("mongoOperator.helpers.BackupChecker.BackupChecker.backupIfNeeded") + @patch("mongoOperator.helpers.BackupHelper.BackupHelper.backupIfNeeded") def test_checkExistingClusters(self, backup_mock, mongo_client_mock): self.checker._cluster_versions[("mongo-cluster", self.cluster_object.metadata.namespace)] = "100" self.kubernetes_service.listMongoObjects.return_value = {"items": [self.cluster_dict]} @@ -65,8 +65,8 @@ def test_checkExistingClusters(self, backup_mock, mongo_client_mock): self.assertEqual(expected, self.kubernetes_service.mock_calls) backup_mock.assert_called_once_with(self.cluster_object) - @patch("mongoOperator.helpers.BaseResourceChecker.BaseResourceChecker.cleanResources") - @patch("mongoOperator.helpers.BaseResourceChecker.BaseResourceChecker.listResources") + @patch("mongoOperator.helpers.resourceCheckers.BaseResourceChecker.BaseResourceChecker.cleanResources") + @patch("mongoOperator.helpers.resourceCheckers.BaseResourceChecker.BaseResourceChecker.listResources") def test_collectGarbage(self, list_mock, clean_mock): list_mock.return_value = [self.cluster_object] self.checker.collectGarbage() @@ -74,7 +74,7 @@ def test_collectGarbage(self, list_mock, clean_mock): self.assertEqual([], self.kubernetes_service.mock_calls) # k8s is not called because we mocked everything @patch("mongoOperator.services.MongoService.MongoClient") - @patch("mongoOperator.helpers.BackupChecker.BackupChecker.backupIfNeeded") + @patch("mongoOperator.helpers.BackupHelper.BackupHelper.backupIfNeeded") def test_checkCluster_same_version(self, backup_mock, mongo_client_mock): self.checker._cluster_versions[("mongo-cluster", "mongo-operator-cluster")] = "100" mongo_client_mock.return_value.admin.command.return_value = self._getMongoFixture("replica-status-ok") @@ -83,9 +83,9 @@ def test_checkCluster_same_version(self, backup_mock, mongo_client_mock): backup_mock.assert_called_once_with(self.cluster_object) @patch("mongoOperator.services.MongoService.MongoClient") - @patch("mongoOperator.helpers.BackupChecker.BackupChecker.backupIfNeeded") + @patch("mongoOperator.helpers.BackupHelper.BackupHelper.backupIfNeeded") @patch("mongoOperator.helpers.MongoResources.MongoResources.createCreateAdminCommand") - @patch("mongoOperator.helpers.BaseResourceChecker.BaseResourceChecker.checkResource") + @patch("mongoOperator.helpers.resourceCheckers.BaseResourceChecker.BaseResourceChecker.checkResource") def test_checkCluster_new_version(self, check_mock, admin_mock, backup_mock, mongo_client_mock): admin_mock.return_value = "createUser", "foo", {} self.checker._cluster_versions[("mongo-cluster", "mongo-operator-cluster")] = "50" diff --git a/tests/helpers/TestServiceChecker.py b/tests/helpers/TestServiceChecker.py index 33e58cb..2272650 100644 --- a/tests/helpers/TestServiceChecker.py +++ b/tests/helpers/TestServiceChecker.py @@ -5,7 +5,7 @@ from unittest import TestCase from unittest.mock import MagicMock -from mongoOperator.helpers.ServiceChecker import ServiceChecker +from mongoOperator.helpers.resourceCheckers.ServiceChecker import ServiceChecker from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration from mongoOperator.services.KubernetesService import KubernetesService from tests.test_utils import getExampleClusterDefinition diff --git a/tests/helpers/TestStatefulSetChecker.py b/tests/helpers/TestStatefulSetChecker.py index 63dc919..a81ad83 100644 --- a/tests/helpers/TestStatefulSetChecker.py +++ b/tests/helpers/TestStatefulSetChecker.py @@ -5,7 +5,7 @@ from unittest import TestCase from unittest.mock import MagicMock -from mongoOperator.helpers.StatefulSetChecker import StatefulSetChecker +from mongoOperator.helpers.resourceCheckers.StatefulSetChecker import StatefulSetChecker from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration from mongoOperator.services.KubernetesService import KubernetesService from tests.test_utils import getExampleClusterDefinition From d4d2a83a3230ef24d6bcec8509fb2972152b525b Mon Sep 17 00:00:00 2001 From: Rick van den Hof Date: Thu, 14 Feb 2019 14:33:01 +0100 Subject: [PATCH 19/36] Add missing unittests, flake8 and pylint checks --- .flake8 | 25 ++++ .pylintrc | 102 +++++++++++++ Dockerfile | 4 + .../listeners/mongo/HeartbeatListener.py | 4 +- mongoOperator/services/KubernetesService.py | 1 - mongoOperator/services/MongoService.py | 4 +- requirements-testing.txt | 4 +- tests/helpers/TestHeartBeatListener.py | 64 ++++++++ tests/helpers/TestServerLogger.py | 8 +- tests/helpers/TestTopologyListener.py | 55 +++++++ tests/services/TestMongoService.py | 138 ++++-------------- 11 files changed, 290 insertions(+), 119 deletions(-) create mode 100644 .flake8 create mode 100644 .pylintrc create mode 100644 tests/helpers/TestHeartBeatListener.py create mode 100644 tests/helpers/TestTopologyListener.py diff --git a/.flake8 b/.flake8 new file mode 100644 index 0000000..5ba7a7e --- /dev/null +++ b/.flake8 @@ -0,0 +1,25 @@ +# Copyright (c) 2018 Ultimaker B.V. +# This file contains the flake8 rules used in the stardust projects. + +# To configure Flake8 as an external tool in PyCharm, create a new External Tool with the settings: +# +# Name: Flake8 +# Program: Check with 'which flake8'. For example: /home/dschiavini/.local/bin/flake8 +# Arguments: $ContentRoot$/$FileDirRelativeToProjectRoot$ --config=$ContentRoot$/lib/stardustCommons/.flake8 --count +# Working directory: $ContentRoot$ +# Output filters: $FILE_PATH$:$LINE$:$COLUMN$:.* +# +# You can add a keyboard shortcut in the keymap settings. Select the folder you want to check (e.g. the root of the +# project) before running the external tool. +# +# If you find a better way to configure the external tool please edit this file. + +[flake8] +# E251 allows us to add extra spaces in constructors and method calls, i.e. method(value = result). +# F401 is ignored because it gives false positives for # type comments +# W503 forbids line breaks before binary operators, we use instead W504 to forbid breaks after the operators +ignore = E251, F401, W503 +exclude = .git,__pycache__,node_modules,libCharon,venv +max-complexity = 6 +max-line-length = 120 +inline-quotes = double \ No newline at end of file diff --git a/.pylintrc b/.pylintrc new file mode 100644 index 0000000..c301f38 --- /dev/null +++ b/.pylintrc @@ -0,0 +1,102 @@ +# Copyright (c) 2018 Ultimaker B.V. +# This file contains the Pylint rules used in the stardust projects. + +# To configure PyLint as an external tool in PyCharm, create a new External Tool with the settings: +# +# Name: PyLint +# Program: Check with 'which pylint'. For example: /home/dschiavini/.local/bin/pylint +# Arguments: $FileDirName$ --rcfile=lib/stardustCommons/.pylintrc --msg-template='{abspath}:{line}:{column}:({symbol}):{msg_id}:{msg}' +# Working directory: $ContentRoot$ +# Output filters: $FILE_PATH$:$LINE$:$COLUMN$:.* +# +# You can add a keyboard shortcut in the keymap settings. To run Pylint to a project, select the module +# you want to check (e.g. stardustAuthServerApi folder) before running the external tool. +# +# If you find a better way to configure the external tool please edit this file. + +[MASTER] +# List of plugins (as comma separated values of python modules names) to load, +# usually to register additional checkers. +load-plugins=pylint_quotes + +# We expect double string quotes +string-quote=double-avoid-escape + +# When enabled, pylint would attempt to guess common misconfiguration and emit +# user-friendly hints instead of false-positive error messages. +suggestion-mode=yes + +# Add files or directories to the blacklist. They should be base names, not paths. +ignore=node_modules,tests + +[REFACTORING] +# Maximum number of nested blocks for function / method body +max-nested-blocks=3 + +[MESSAGES CONTROL] +# C0326: No space allowed around keyword argument assignment +# C0111: Missing module docstring (missing-docstring) +# C0411: Ignore import order because the rules are different than in PyCharm, so automatic imports break lots of builds +# R0201: Method could be a function (no-self-use) +# R0401: Cyclic imports (cyclic-import) are used for typing +# R0801: Unfortunately the error is triggered for a lot of similar models (duplicate-code) +# R1710: Either all return statements in a function should return an expression, or none of them should. +# W0221: Parameters differ from overridden method (tornado http methods have a flexible number of parameters) +# W0511: Ignore warnings generated for TODOs in the code +disable=C0326,C0411,C0111,R0201,R0401,R0801,R1710,W0221,W0511 + +[FORMAT] +# Maximum number of characters on a single line. +max-line-length=120 + +# Maximum number of lines in a module. +max-module-lines=500 + +good-names=os + +[BASIC] +# allow modules and functions to use pascalCase +module-rgx=[a-zA-Z0-9_]+$ +function-rgx= +method-rgx=([a-z_][a-z0-9_]{2,30}|([a-z_][A-Za-z0-9]{2,30}))$ + +[DESIGN] +# Maximum number of arguments for function / method. +max-args=7 + +# Maximum number of attributes for a class (see R0902). +max-attributes=8 + +# Maximum number of boolean expressions in an if statement. +max-bool-expr=5 + +# Maximum number of branch for function / method body. +max-branches=12 + +# Maximum number of locals for function / method body. +max-locals=15 + +# Maximum number of parents for a class (see R0901). +max-parents=7 + +# Maximum number of public methods for a class (see R0904). +max-public-methods=20 + +# Maximum number of return / yield for function / method body. +max-returns=6 + +# Maximum number of statements in function / method body. +max-statements=50 + +# Minimum number of public methods for a class (R0903). +# We set this to 0 because our models and fields do not have methods. +min-public-methods=0 + +[CLASSES] +defining-attr-methods=__init__,__new__,setUp,initialize + +[TYPECHECK] +ignored-classes=NotImplemented + +[VARIABLES] +dummy-variables-rgx=_+[a-z0-9_]{2,30} \ No newline at end of file diff --git a/Dockerfile b/Dockerfile index 982895f..a3d42aa 100644 --- a/Dockerfile +++ b/Dockerfile @@ -25,6 +25,10 @@ ADD . . RUN ENV_NAME=testing ASYNC_TEST_TIMEOUT=15 coverage run --source="mongoOperator" -m pytest -vvx && \ coverage report --skip-covered --show-missing --fail-under=100 +# Linting +RUN flake8 . --count +RUN pylint stardustAuthServerApi --rcfile=lib/stardustCommons/.pylintrc + # This is the container build statements that will create the container meant for deployment FROM base AS build WORKDIR /usr/src/app diff --git a/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py b/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py index 61837a1..7b206a0 100644 --- a/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py +++ b/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py @@ -44,7 +44,7 @@ def succeeded(self, event: ServerHeartbeatSucceededEvent) -> None: # The callback was already executed so we don't have to again. logging.debug("The callback was already executed") return - + host_count_found = len(list(filter(lambda x: self._hosts[x] == 1, self._hosts))) if self._expected_host_count != host_count_found: # The amount of returned hosts was different than expected. @@ -52,7 +52,7 @@ def succeeded(self, event: ServerHeartbeatSucceededEvent) -> None: host_count_found, self._expected_host_count )) return - + if "info" in event.reply.document and event.reply.document["info"] == self.INVALID_REPLICA_SET_CONFIG: # The reply indicated that the replica set config was not correct. logging.debug("The replica set config was not correct: {}".format(repr(event.reply))) diff --git a/mongoOperator/services/KubernetesService.py b/mongoOperator/services/KubernetesService.py index c7362ab..d389de2 100644 --- a/mongoOperator/services/KubernetesService.py +++ b/mongoOperator/services/KubernetesService.py @@ -6,7 +6,6 @@ from unittest.mock import patch from kubernetes.client.rest import ApiException -from kubernetes.stream import stream from typing import Dict, Optional import yaml diff --git a/mongoOperator/services/MongoService.py b/mongoOperator/services/MongoService.py index a38ff95..4f9ac1b 100644 --- a/mongoOperator/services/MongoService.py +++ b/mongoOperator/services/MongoService.py @@ -149,8 +149,8 @@ def _createMongoClientForReplicaSet(self, cluster_object: V1MongoClusterConfigur event_listeners = [ CommandLogger(), ServerLogger(), - TopologyListener(cluster_object, replica_set_ready_callback = self._onReplicaSetReady), - HeartbeatListener(cluster_object, all_hosts_ready_callback = self._onAllHostsReady) + TopologyListener(cluster_object, replica_set_ready_callback=self._onReplicaSetReady), + HeartbeatListener(cluster_object, all_hosts_ready_callback=self._onAllHostsReady) ] ) diff --git a/requirements-testing.txt b/requirements-testing.txt index fea764c..4568bc4 100644 --- a/requirements-testing.txt +++ b/requirements-testing.txt @@ -1,2 +1,4 @@ pytest -coverage \ No newline at end of file +coverage +flake8-quotes +pylint-quotes \ No newline at end of file diff --git a/tests/helpers/TestHeartBeatListener.py b/tests/helpers/TestHeartBeatListener.py new file mode 100644 index 0000000..1a5c9d9 --- /dev/null +++ b/tests/helpers/TestHeartBeatListener.py @@ -0,0 +1,64 @@ +# Copyright (c) 2018 Ultimaker +# !/usr/bin/env python +# -*- coding: utf-8 -*- +from typing import cast +from unittest import TestCase +from unittest.mock import Mock, MagicMock + +from pymongo.monitoring import ServerHeartbeatStartedEvent, ServerHeartbeatSucceededEvent, ServerHeartbeatFailedEvent +from pymongo.ismaster import IsMaster +from mongoOperator.helpers.listeners.mongo.HeartbeatListener import HeartbeatListener +from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration +from tests.test_utils import getExampleClusterDefinition + + +class TestHeartbeatLogger(TestCase): + def setUp(self): + self.cluster_dict = getExampleClusterDefinition() + self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) + self.kubernetes_service = MagicMock() + self._onAllHostsReadyCallback = MagicMock() + + def test_started(self): + heartbeat_logger = HeartbeatListener(self.cluster_object, + all_hosts_ready_callback=self._onAllHostsReadyCallback) + + heartbeat_logger.started(event=cast(ServerHeartbeatStartedEvent, Mock(spec=ServerHeartbeatStartedEvent))) + + def test_succeeded(self): + heartbeat_logger = HeartbeatListener(self.cluster_object, + all_hosts_ready_callback=self._onAllHostsReadyCallback) + + # Fake two already successful hosts + heartbeat_logger.succeeded(event=cast(ServerHeartbeatSucceededEvent, MagicMock())) + heartbeat_logger.succeeded(event=cast(ServerHeartbeatSucceededEvent, MagicMock())) + + heartbeat_event_mock = MagicMock(spec=ServerHeartbeatSucceededEvent) + heartbeat_event_mock.reply.document = {"info": ""} + heartbeat_logger.succeeded(event=cast(ServerHeartbeatSucceededEvent, heartbeat_event_mock)) + + self._onAllHostsReadyCallback.assert_called_once_with(self.cluster_object) + + def test_succeeded_invalid_replicaSet(self): + heartbeat_logger = HeartbeatListener(self.cluster_object, + all_hosts_ready_callback=self._onAllHostsReadyCallback) + + # Fake two already successful hosts + heartbeat_logger._hosts = {"foo": 1, "bar": 1} + + # Call it with invalid replicaSet configuration + heartbeat_event_mock = MagicMock(spec=ServerHeartbeatSucceededEvent) + heartbeat_event_mock.reply.document = {"info": "Does not have a valid replica set config"} + heartbeat_logger.succeeded(event=cast(ServerHeartbeatSucceededEvent, heartbeat_event_mock)) + + def test_succeeded_already_called(self): + heartbeat_logger = HeartbeatListener(self.cluster_object, + all_hosts_ready_callback=self._onAllHostsReadyCallback) + + heartbeat_logger._callback_executed = True + heartbeat_logger.succeeded(event=cast(ServerHeartbeatSucceededEvent, MagicMock())) + + def test_failed(self): + heartbeat_logger = HeartbeatListener(self.cluster_object, + all_hosts_ready_callback=self._onAllHostsReadyCallback) + heartbeat_logger.failed(event=cast(ServerHeartbeatFailedEvent, Mock(spec=ServerHeartbeatFailedEvent))) diff --git a/tests/helpers/TestServerLogger.py b/tests/helpers/TestServerLogger.py index 35582ea..e7d4e97 100644 --- a/tests/helpers/TestServerLogger.py +++ b/tests/helpers/TestServerLogger.py @@ -26,10 +26,12 @@ class TestServerLogger(TestCase): server_logger = ServerLogger() def test_opened(self): - self.server_logger.opened(event = cast(ServerOpeningEvent, ServerEventMock())) + self.server_logger.opened(event=cast(ServerOpeningEvent, ServerEventMock())) def test_closed(self): - self.server_logger.closed(event = cast(ServerClosedEvent, ServerEventMock())) + self.server_logger.closed(event=cast(ServerClosedEvent, ServerEventMock())) def test_description_changed(self): - self.server_logger.description_changed(event = cast(ServerDescriptionChangedEvent, ServerEventMock())) + serverEventMock = ServerEventMock() + serverEventMock.new_description.server_type = "bar" + self.server_logger.description_changed(event=cast(ServerDescriptionChangedEvent, serverEventMock)) diff --git a/tests/helpers/TestTopologyListener.py b/tests/helpers/TestTopologyListener.py new file mode 100644 index 0000000..0d7df96 --- /dev/null +++ b/tests/helpers/TestTopologyListener.py @@ -0,0 +1,55 @@ +# Copyright (c) 2018 Ultimaker +# !/usr/bin/env python +# -*- coding: utf-8 -*- +from typing import cast +from unittest import TestCase +from unittest.mock import Mock, MagicMock + +from pymongo.monitoring import TopologyDescriptionChangedEvent, TopologyOpenedEvent, TopologyClosedEvent +from mongoOperator.helpers.listeners.mongo.TopologyListener import TopologyListener +from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration +from tests.test_utils import getExampleClusterDefinition + + +class TestTopologyLogger(TestCase): + def setUp(self): + self.cluster_dict = getExampleClusterDefinition() + self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) + self.kubernetes_service = MagicMock() + self._onReplicaSetReadyCallback = MagicMock() + + def test_opened(self): + topology_logger = TopologyListener(self.cluster_object, + replica_set_ready_callback=self._onReplicaSetReadyCallback) + + topology_logger.opened(event=cast(TopologyOpenedEvent, Mock(spec=TopologyOpenedEvent))) + + def test_description_changed(self): + topology_logger = TopologyListener(self.cluster_object, + replica_set_ready_callback=self._onReplicaSetReadyCallback) + + topology_description_changed_event_mock = MagicMock(spec=TopologyDescriptionChangedEvent) + topology_description_changed_event_mock.new_description.topology_type = "foo" + topology_description_changed_event_mock.new_description.has_writable_server.return_value = False + topology_description_changed_event_mock.new_description.has_readable_server.return_value = False + topology_logger.description_changed(event=cast(TopologyDescriptionChangedEvent, + topology_description_changed_event_mock)) + + def test_description_changed_with_callback(self): + topology_logger = TopologyListener(self.cluster_object, + replica_set_ready_callback=self._onReplicaSetReadyCallback) + + topology_description_changed_event_mock = MagicMock(spec=TopologyDescriptionChangedEvent) + topology_description_changed_event_mock.new_description.has_writable_server.return_value = True + + topology_logger.description_changed(event=cast(TopologyDescriptionChangedEvent, + topology_description_changed_event_mock)) + + self._onReplicaSetReadyCallback.assert_called_once_with(self.cluster_object) + + def test_closed(self): + topology_logger = TopologyListener(self.cluster_object, + replica_set_ready_callback=self._onReplicaSetReadyCallback) + topology_logger.closed(event=cast(TopologyClosedEvent, Mock(spec=TopologyClosedEvent))) + + diff --git a/tests/services/TestMongoService.py b/tests/services/TestMongoService.py index 72955db..b2de40c 100644 --- a/tests/services/TestMongoService.py +++ b/tests/services/TestMongoService.py @@ -7,7 +7,7 @@ from kubernetes.client import V1Secret, V1ObjectMeta from typing import Union from unittest import TestCase -from unittest.mock import MagicMock, patch +from unittest.mock import MagicMock, patch, Mock from mongoOperator.helpers.MongoResources import MongoResources from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration @@ -83,12 +83,6 @@ def test_mongoAdminCommand(self, mongo_client_mock): mongo_client_mock.return_value.admin.command.return_value = self._getFixture("initiate-ok") result = self.service._executeAdminCommand(self.cluster_object, "replSetInitiate") self.assertEqual(self.initiate_ok_response, result) - - # expected_calls = [ - # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), - # call().admin.command('replSetInitiate') - # ] - # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) def test__mongoAdminCommand_NodeNotFound(self, mongo_client_mock): mongo_client_mock.return_value.admin.command.side_effect = OperationFailure( @@ -98,12 +92,6 @@ def test__mongoAdminCommand_NodeNotFound(self, mongo_client_mock): mongo_command, mongo_args = MongoResources.createReplicaInitiateCommand(self.cluster_object) self.service._executeAdminCommand(self.cluster_object, mongo_command, mongo_args) - # expected = [ - # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), - # call().admin.command('replSetInitiate', self.expected_cluster_config) - # ] - # self.assertEqual(expected, mongo_client_mock.mock_calls) - self.assertIn("replSetInitiate quorum check failed", str(ex.exception)) def test__mongoAdminCommand_connect_failed(self, mongo_client_mock): @@ -113,12 +101,6 @@ def test__mongoAdminCommand_connect_failed(self, mongo_client_mock): ) result = self.service._executeAdminCommand(self.cluster_object, "replSetGetStatus") self.assertEqual(self.initiate_ok_response, result) - - # expected_calls = 2 * [ - # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), - # call().admin.command('replSetGetStatus') - # ] - # self.assertEqual(expected_calls, mongo_client_mock.mock_calls) def test__mongoAdminCommand_TimeoutError(self, mongo_client_mock): mongo_client_mock.return_value.admin.command.side_effect = ( @@ -133,12 +115,6 @@ def test__mongoAdminCommand_TimeoutError(self, mongo_client_mock): self.service._executeAdminCommand(self.cluster_object, "replSetGetStatus") self.assertEqual("Could not execute command after 4 retries!", str(context.exception)) - - # expected_calls = 4 * [ - # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), - # call().admin.command('replSetGetStatus') - # ] - # self.assertEqual(expected_calls, mongo_client_mock.mock_calls) def test__mongoAdminCommand_NoPrimary(self, mongo_client_mock): mongo_client_mock.return_value.admin.command.side_effect = ( @@ -150,29 +126,9 @@ def test__mongoAdminCommand_NoPrimary(self, mongo_client_mock): self.service._executeAdminCommand(self.cluster_object, "replSetGetStatus") - # expected_calls = [ - # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), - # call().admin.command('replSetGetStatus'), - # call(MongoResources.getMemberHostname(0, self.cluster_object.metadata.name, self.cluster_object.metadata.namespace)), - # call().admin.command('replSetInitiate', self.expected_cluster_config), - # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), - # call().admin.command('replSetGetStatus') - # ] - # print(repr(mongoclient_mock.mock_calls)) - # print(repr(expected_calls)) - # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) - def test_initializeReplicaSet(self, mongo_client_mock): mongo_client_mock.return_value.admin.command.return_value = self._getFixture("initiate-ok") self.service._initializeReplicaSet(self.cluster_object) - - # expected_calls = [ - # call(MongoResources.getMemberHostname(0, self.cluster_object.metadata.name, - # self.cluster_object.metadata.namespace)), - # call().admin.command('replSetInitiate', self.expected_cluster_config) - # ] - # - # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) def test_initializeReplicaSet_ValueError(self, mongo_client_mock): command_result = self._getFixture("initiate-ok") @@ -190,12 +146,6 @@ def test_initializeReplicaSet_ValueError(self, mongo_client_mock): def test_reconfigureReplicaSet(self, mongo_client_mock): mongo_client_mock.return_value.admin.command.return_value = self._getFixture("initiate-ok") self.service._reconfigureReplicaSet(self.cluster_object) - - # expected_calls = [ - # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), - # call().admin.command('replSetReconfig', self.expected_cluster_config) - # ] - # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) def test_reconfigureReplicaSet_ValueError(self, mongo_client_mock): command_result = self._getFixture("initiate-ok") @@ -213,12 +163,6 @@ def test_checkOrCreateReplicaSet_ok(self, mongo_client_mock): mongo_client_mock.return_value.admin.command.return_value = self._getFixture("replica-status-ok") self.service.checkOrCreateReplicaSet(self.cluster_object) - # expected_calls = [ - # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), - # call().admin.command('replSetGetStatus') - # ] - # self.assertEqual(expected_calls, mongo_client_mock.mock_calls) - def test_checkOrCreateReplicaSet_initialize(self, mongo_client_mock): mongo_client_mock.return_value.admin.command.side_effect = ( OperationFailure("no replset config has been received"), @@ -226,16 +170,6 @@ def test_checkOrCreateReplicaSet_initialize(self, mongo_client_mock): ) self.service.checkOrCreateReplicaSet(self.cluster_object) - # expected_calls = [ - # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), - # call().admin.command('replSetGetStatus'), - # call(MongoResources.getMemberHostname(0, self.cluster_object.metadata.name, - # self.cluster_object.metadata.namespace)), - # call().admin.command('replSetInitiate', self.expected_cluster_config) - # ] - # - # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) - def test_checkOrCreateReplicaSet_reconfigure(self, mongo_client_mock): self.cluster_object.spec.mongodb.replicas = 4 mongo_client_mock.return_value.admin.command.return_value = self._getFixture("replica-status-ok") @@ -245,15 +179,6 @@ def test_checkOrCreateReplicaSet_reconfigure(self, mongo_client_mock): "host": "mongo-cluster-3.mongo-cluster.mongo-cluster.svc.cluster.local" }) - # expected_calls = [ - # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), - # call().admin.command('replSetGetStatus'), - # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), - # call().admin.command('replSetReconfig', self.expected_cluster_config) - # ] - # - # self.assertEqual(expected_calls, mongo_client_mock.mock_calls) - def test_checkOrCreateReplicaSet_ValueError(self, mongo_client_mock): response = self._getFixture("replica-status-ok") response["ok"] = 2 @@ -262,12 +187,6 @@ def test_checkOrCreateReplicaSet_ValueError(self, mongo_client_mock): with self.assertRaises(ValueError) as context: self.service.checkOrCreateReplicaSet(self.cluster_object) - # expected_calls = [ - # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), - # call().admin.command('replSetGetStatus') - # ] - # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) - self.assertIn("Unexpected response trying to check replicas: ", str(context.exception)) def test_checkOrCreateReplicaSet_OperationalFailure(self, mongo_client_mock): @@ -279,26 +198,13 @@ def test_checkOrCreateReplicaSet_OperationalFailure(self, mongo_client_mock): with self.assertRaises(OperationFailure) as context: self.service.checkOrCreateReplicaSet(self.cluster_object) - # - # expected_calls = [ - # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), - # call().admin.command('replSetGetStatus'), - # ] - # - # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) - + self.assertEqual(str(context.exception), bad_value) def test_createUsers_ok(self, mongo_client_mock): mongo_client_mock.return_value.admin.command.return_value = self._getFixture("createUser-ok") self.service.createUsers(self.cluster_object) - # expected_calls = [ - # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), - # call().admin.command("createUser", "root", **self.expected_user_create) - # ] - # self.assertEqual(expected_calls, mongo_client_mock.mock_calls) - def test_createUsers_ValueError(self, mongo_client_mock): mongo_client_mock.return_value.admin.command.side_effect = OperationFailure( "\"createUser\" had the wrong type. Expected string, found object"), @@ -306,13 +212,6 @@ def test_createUsers_ValueError(self, mongo_client_mock): with self.assertRaises(OperationFailure) as context: self.service.createUsers(self.cluster_object) - # expected_calls = [ - # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), - # call().admin.command("createUser", "root", **self.expected_user_create) - # ] - # - # self.assertEqual(expected_calls, mongoclient_mock.mock_calls) - self.assertEqual("\"createUser\" had the wrong type. Expected string, found object", str(context.exception)) def test_createUsers_TimeoutError(self, mongo_client_mock): @@ -324,11 +223,30 @@ def test_createUsers_TimeoutError(self, mongo_client_mock): with self.assertRaises(TimeoutError) as context: self.service.createUsers(self.cluster_object) - # expected_calls = 4 * [ - # call(MongoResources.getConnectionSeeds(self.cluster_object), replicaSet=self.cluster_object.metadata.name), - # call().admin.command("createUser", "root", **self.expected_user_create) - # ] - # - # self.assertEqual(expected_calls, mongo_client_mock.mock_calls) - self.assertEqual("Could not execute command after 4 retries!", str(context.exception)) + + def test_onReplicaSetReady(self, mongo_client_mock): + self.service._restore_helper.restoreIfNeeded = MagicMock() + + self.service._onReplicaSetReady(self.cluster_object) + + self.service._restore_helper.restoreIfNeeded.assert_called() + mongo_client_mock.assert_not_called() + + def test_onReplicaSetReady_alreadyRestored(self, mongo_client_mock): + self.service._restore_helper.restoreIfNeeded = MagicMock() + self.service._restored_cluster_names.append("mongo-cluster") + + self.service._onReplicaSetReady(self.cluster_object) + + self.service._restore_helper.restoreIfNeeded.assert_not_called() + mongo_client_mock.assert_not_called() + + def test_onAllHostsReady(self, mongo_client_mock): + self.service._initializeReplicaSet = MagicMock() + + self.service._onAllHostsReady(self.cluster_object) + + self.service._initializeReplicaSet.assert_called() + mongo_client_mock.assert_not_called() + From c8589e4c276121d86e4c9d27c83a313c440c1809 Mon Sep 17 00:00:00 2001 From: Rick van den Hof Date: Thu, 14 Feb 2019 16:58:03 +0100 Subject: [PATCH 20/36] Flake8 and pylint fixes --- Dockerfile | 2 +- Settings.py | 5 +-- main.py | 2 +- mongoOperator/ClusterManager.py | 6 +-- mongoOperator/MongoOperator.py | 5 ++- mongoOperator/helpers/BackupHelper.py | 2 +- mongoOperator/helpers/KubernetesResources.py | 4 +- mongoOperator/helpers/MongoResources.py | 3 +- mongoOperator/helpers/RestoreHelper.py | 15 ++++--- .../helpers/listeners/mongo/CommandLogger.py | 12 +++--- .../listeners/mongo/HeartbeatListener.py | 20 +++++----- .../helpers/listeners/mongo/ServerLogger.py | 10 ++--- .../listeners/mongo/TopologyListener.py | 12 +++--- .../resourceCheckers/AdminSecretChecker.py | 10 ++--- .../resourceCheckers/BaseResourceChecker.py | 21 +++++----- .../resourceCheckers/ServiceChecker.py | 10 ++--- .../resourceCheckers/StatefulSetChecker.py | 10 ++--- mongoOperator/models/BaseModel.py | 4 +- .../V1MongoClusterConfigurationSpecMongoDB.py | 12 +++--- mongoOperator/models/fields.py | 2 +- mongoOperator/services/KubernetesService.py | 29 +++++++------- mongoOperator/services/MongoService.py | 14 +++---- tests/TestMongoOperator.py | 20 +++++++++- tests/helpers/TestBackupChecker.py | 18 ++++----- tests/helpers/TestBaseResourceChecker.py | 6 +-- tests/helpers/TestCommandLogger.py | 6 +-- tests/helpers/TestRestoreHelper.py | 6 +-- tests/helpers/TestServerLogger.py | 8 ++-- tests/helpers/TestTopologyListener.py | 2 - .../models/TestV1MongoClusterConfiguration.py | 2 +- tests/services/TestKubernetesService.py | 27 +++++++------ tests/services/TestMongoService.py | 39 +++++++++---------- tests/test_utils.py | 2 + 33 files changed, 180 insertions(+), 166 deletions(-) diff --git a/Dockerfile b/Dockerfile index a3d42aa..92f5d32 100644 --- a/Dockerfile +++ b/Dockerfile @@ -27,7 +27,7 @@ RUN ENV_NAME=testing ASYNC_TEST_TIMEOUT=15 coverage run --source="mongoOperator" # Linting RUN flake8 . --count -RUN pylint stardustAuthServerApi --rcfile=lib/stardustCommons/.pylintrc +RUN pylint mongoOperator # This is the container build statements that will create the container meant for deployment FROM base AS build diff --git a/Settings.py b/Settings.py index 97158d2..2b0bf0e 100644 --- a/Settings.py +++ b/Settings.py @@ -3,7 +3,6 @@ # -*- coding: utf-8 -*- import os - STRING_TO_BOOL_DICT = {"True", "true", "yes", "1"} @@ -11,11 +10,11 @@ class Settings: """ Class responsible for keeping the application settings. """ - + # Custom resource (CRD) API config. CUSTOM_OBJECT_API_GROUP = "operators.ultimaker.com" CUSTOM_OBJECT_API_VERSION = "v1" CUSTOM_OBJECT_RESOURCE_PLURAL = "mongos" - + # Kubernetes config. KUBERNETES_SERVICE_DEBUG = os.getenv("KUBERNETES_SERVICE_DEBUG") in STRING_TO_BOOL_DICT diff --git a/main.py b/main.py index 4430543..d36999b 100644 --- a/main.py +++ b/main.py @@ -7,7 +7,7 @@ from mongoOperator.MongoOperator import MongoOperator -if __name__ == '__main__': +if __name__ == "__main__": logging.basicConfig(format="%(asctime)s [%(levelname)s] %(module)s:%(lineno)s: %(message)s", level=os.getenv("LOGGING_LEVEL", "DEBUG")) diff --git a/mongoOperator/ClusterManager.py b/mongoOperator/ClusterManager.py index dae5a82..d1dd941 100644 --- a/mongoOperator/ClusterManager.py +++ b/mongoOperator/ClusterManager.py @@ -56,7 +56,7 @@ def _checkCluster(self, cluster_object: V1MongoClusterConfiguration, force: bool :param force: If this is True, we will re-update the cluster even if it has been checked before. """ key = cluster_object.metadata.name, cluster_object.metadata.namespace - + if self._cluster_versions.get(key) == cluster_object.metadata.resource_version and not force: logging.debug("Cluster object %s has been checked already in version %s.", key, cluster_object.metadata.resource_version) @@ -84,5 +84,5 @@ def _parseConfiguration(cluster_dict: Dict[str, any]) -> Optional[V1MongoCluster return result except ValueError as err: meta = cluster_dict.get("metadata", {}) - logging.error("Could not validate cluster configuration for {} @ ns/{}: {}. The cluster will be ignored." - .format(meta.get("name"), meta.get("namespace"), err)) + logging.error("Could not validate cluster configuration for %s @ ns/%s: %s. The cluster will be ignored.", + meta.get("name"), meta.get("namespace"), err) diff --git a/mongoOperator/MongoOperator.py b/mongoOperator/MongoOperator.py index 301a95b..338c94a 100644 --- a/mongoOperator/MongoOperator.py +++ b/mongoOperator/MongoOperator.py @@ -29,8 +29,9 @@ def run_forever(self) -> None: try: checker.checkExistingClusters() checker.collectGarbage() - except Exception as e: - logging.exception(e) + except Exception as global_exception: + logging.exception(global_exception) + raise logging.info("Checks done, waiting %s seconds", self._sleep_per_run) sleep(self._sleep_per_run) except KeyboardInterrupt: diff --git a/mongoOperator/helpers/BackupHelper.py b/mongoOperator/helpers/BackupHelper.py index 33432ed..fc4e796 100644 --- a/mongoOperator/helpers/BackupHelper.py +++ b/mongoOperator/helpers/BackupHelper.py @@ -69,7 +69,7 @@ def backup(self, cluster_object: V1MongoClusterConfiguration, now: datetime): """ backup_file = "/tmp/" + self.BACKUP_FILE_FORMAT.format(namespace=cluster_object.metadata.namespace, name=cluster_object.metadata.name, - date=now.strftime('%Y-%m-%d_%H%M%S')) + date=now.strftime("%Y-%m-%d_%H%M%S")) pod_index = cluster_object.spec.mongodb.replicas - 1 # take last pod hostname = MongoResources.getMemberHostname(pod_index, cluster_object.metadata.name, diff --git a/mongoOperator/helpers/KubernetesResources.py b/mongoOperator/helpers/KubernetesResources.py index 066297c..9d25031 100644 --- a/mongoOperator/helpers/KubernetesResources.py +++ b/mongoOperator/helpers/KubernetesResources.py @@ -11,7 +11,7 @@ class KubernetesResources: """ Helper class responsible for creating the Kubernetes model objects. """ - + # These are fixed values. They need to be these exact values for Mongo to work properly with the operator. MONGO_IMAGE = "mongo:3.6.4" MONGO_NAME = "mongodb" @@ -28,7 +28,7 @@ class KubernetesResources: DEFAULT_STORAGE_SIZE = "30Gi" DEFAULT_STORAGE_MOUNT_PATH = "/data/db" DEFAULT_STORAGE_CLASS_NAME = None # when None is passed the value is simply ignored by Kubernetes - + # Default resource allocation. # See https://docs.mongodb.com/manual/administration/production-notes/#allocate-sufficient-ram-and-cpu. DEFAULT_CPU_LIMIT = "1" diff --git a/mongoOperator/helpers/MongoResources.py b/mongoOperator/helpers/MongoResources.py index 93694c7..82998e8 100644 --- a/mongoOperator/helpers/MongoResources.py +++ b/mongoOperator/helpers/MongoResources.py @@ -21,7 +21,8 @@ def getMemberHostname(cls, pod_index, cluster_name, namespace) -> str: :param namespace: The namespace of the cluster. :return: The name of the host. """ - return "{}-{}.{}.{}.svc.cluster.local".format(cluster_name, pod_index, cluster_name, namespace) + return "{cluster_name}-{}.{cluster_name}.{}.svc.cluster.local".format(pod_index, namespace, + cluster_name=cluster_name) @classmethod def getMemberHostnames(cls, cluster_object: V1MongoClusterConfiguration) -> List[str]: diff --git a/mongoOperator/helpers/RestoreHelper.py b/mongoOperator/helpers/RestoreHelper.py index e82dd8b..9866523 100644 --- a/mongoOperator/helpers/RestoreHelper.py +++ b/mongoOperator/helpers/RestoreHelper.py @@ -91,7 +91,7 @@ def restoreIfNeeded(self, cluster_object: V1MongoClusterConfiguration) -> bool: """ if cluster_object.spec.backups.gcs.restore_from is not None: backup_file = cluster_object.spec.backups.gcs.restore_from - if backup_file == 'latest': + if backup_file == "latest": backup_file = self.getLastBackup(cluster_object) logging.info("Attempting to restore file %s to cluster %s @ ns/%s.", backup_file, @@ -120,15 +120,15 @@ def restore(self, cluster_object: V1MongoClusterConfiguration, backup_file: str) for _ in range(self.RESTORE_RETRIES): # Wait for the replica set to become ready try: - logging.info("Running mongorestore --host %s --gzip --archive=%s", ','.join(hostnames), downloaded_file) - restore_output = check_output(["mongorestore", "--host", ','.join(hostnames), "--gzip", + logging.info("Running mongorestore --host %s --gzip --archive=%s", ",".join(hostnames), downloaded_file) + restore_output = check_output(["mongorestore", "--host", ",".join(hostnames), "--gzip", "--archive=" + downloaded_file]) logging.info("Restore output: %s", restore_output) os.remove(downloaded_file) return True except CalledProcessError as err: - logging.error("Could not restore '{}', attempt {}. Return code: {} stderr: '{}' stdout: '{}'" - .format(backup_file, _, err.returncode, err.stderr, err.stdout)) + logging.error("Could not restore '%s', attempt %d. Return code: %s stderr: '%s' stdout: '%s'", + backup_file, _, err.returncode, err.stderr, err.stdout) sleep(self.RESTORE_WAIT) raise SubprocessError("Could not restore '{}' after {} retries!".format(backup_file, self.RESTORE_RETRIES)) @@ -140,11 +140,10 @@ def _downloadBackup(self, cluster_object: V1MongoClusterConfiguration, backup_fi :return: The location of the downloaded file. """ prefix = cluster_object.spec.backups.gcs.prefix or self.DEFAULT_BACKUP_PREFIX + restore_bucket = cluster_object.spec.backups.gcs.restore_bucket or cluster_object.spec.backups.gcs.bucket return self._downloadFile( credentials=self._getCredentials(cluster_object), - bucket_name=cluster_object.spec.backups.gcs.restore_bucket \ - if cluster_object.spec.backups.gcs.restore_bucket \ - else cluster_object.spec.backups.gcs.bucket, + bucket_name=restore_bucket, key="{}/{}".format(prefix, backup_file), file_name="/tmp/" + backup_file ) diff --git a/mongoOperator/helpers/listeners/mongo/CommandLogger.py b/mongoOperator/helpers/listeners/mongo/CommandLogger.py index 25e2ba1..d070ca8 100644 --- a/mongoOperator/helpers/listeners/mongo/CommandLogger.py +++ b/mongoOperator/helpers/listeners/mongo/CommandLogger.py @@ -14,21 +14,21 @@ def started(self, event: CommandStartedEvent) -> None: When a command was started. :param event: The event. """ - logging.debug("Command {0.command_name} with request id {0.request_id} started on server {0.connection_id}" - .format(event)) + logging.debug("Command %s with request id %s started on server %s", + event.command_name, event.request_id, event.connection_id) def succeeded(self, event: CommandSucceededEvent) -> None: """ When a command succeeded. :param event: The event. """ - logging.debug("Command {0.command_name} with request id {0.request_id} on server {0.connection_id} succeeded " - "in {0.duration_micros} microseconds".format(event)) + logging.debug("Command %s with request id %s on server %s succeeded in %s microseconds", + event.command_name, event.request_id, event.connection_id, event.duration_micros) def failed(self, event: CommandFailedEvent) -> None: """ When a command failed. :param event: The event. """ - logging.debug("Command {0.command_name} with request id {0.request_id} on server {0.connection_id} failed in " - "{0.duration_micros} microseconds".format(event)) + logging.debug("Command %s with request id %s on server %s failed in %s microseconds", + event.command_name, event.request_id, event.connection_id, event.duration_micros) diff --git a/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py b/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py index 7b206a0..c20a16a 100644 --- a/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py +++ b/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py @@ -12,9 +12,9 @@ class HeartbeatListener(ServerHeartbeatListener): """ A listener for Mongo server heartbeats. """ - + INVALID_REPLICA_SET_CONFIG = "Does not have a valid replica set config" - + def __init__(self, cluster_object: V1MongoClusterConfiguration, all_hosts_ready_callback: Callable[[V1MongoClusterConfiguration], None]) -> None: self._cluster_object: V1MongoClusterConfiguration = cluster_object @@ -28,7 +28,7 @@ def started(self, event: ServerHeartbeatStartedEvent) -> None: When the heartbeat was sent. :param event: The event. """ - logging.debug("Heartbeat sent to server {0.connection_id}".format(event)) + logging.debug("Heartbeat sent to server %s", event.connection_id) self._hosts[event.connection_id] = 0 def succeeded(self, event: ServerHeartbeatSucceededEvent) -> None: @@ -37,9 +37,9 @@ def succeeded(self, event: ServerHeartbeatSucceededEvent) -> None: :param event: The event. """ # The reply.document attribute was added in PyMongo 3.4. - logging.debug("Heartbeat to server {0.connection_id} succeeded with reply {0.reply.document}".format(event)) + logging.debug("Heartbeat to server %s succeeded with reply %s", event.connection_id, event.reply.document) self._hosts[event.connection_id] = 1 - + if self._callback_executed: # The callback was already executed so we don't have to again. logging.debug("The callback was already executed") @@ -48,14 +48,13 @@ def succeeded(self, event: ServerHeartbeatSucceededEvent) -> None: host_count_found = len(list(filter(lambda x: self._hosts[x] == 1, self._hosts))) if self._expected_host_count != host_count_found: # The amount of returned hosts was different than expected. - logging.debug("The host count did not match the expected host count: {} found, {} expected".format( - host_count_found, self._expected_host_count - )) + logging.debug("The host count did not match the expected host count: %s found, %s expected", + host_count_found, self._expected_host_count) return if "info" in event.reply.document and event.reply.document["info"] == self.INVALID_REPLICA_SET_CONFIG: # The reply indicated that the replica set config was not correct. - logging.debug("The replica set config was not correct: {}".format(repr(event.reply))) + logging.debug("The replica set config was not correct: %s", event.reply) return self._all_hosts_ready_callback(self._cluster_object) @@ -66,5 +65,6 @@ def failed(self, event: ServerHeartbeatFailedEvent) -> None: When the heartbeat did not arrive. :param event: The event. """ - logging.warning("Heartbeat to server {0.connection_id} failed with error {0.reply}".format(event)) + logging.warning("Heartbeat to server %s failed with error %s", + event.connection_id, event.reply) self._hosts[event.connection_id] = -1 diff --git a/mongoOperator/helpers/listeners/mongo/ServerLogger.py b/mongoOperator/helpers/listeners/mongo/ServerLogger.py index 3a134ff..0e52a3d 100644 --- a/mongoOperator/helpers/listeners/mongo/ServerLogger.py +++ b/mongoOperator/helpers/listeners/mongo/ServerLogger.py @@ -14,7 +14,7 @@ def opened(self, event: ServerOpeningEvent) -> None: When the server was added to the network. :param event: The event. """ - logging.debug("Server {0.server_address} added to topology {0.topology_id}".format(event)) + logging.debug("Server %s added to topology %s", event.server_address, event.topology_id) def description_changed(self, event: ServerDescriptionChangedEvent) -> None: """ @@ -25,13 +25,13 @@ def description_changed(self, event: ServerDescriptionChangedEvent) -> None: new_server_type = event.new_description.server_type if new_server_type != previous_server_type: # server_type_name was added in PyMongo 3.4 - logging.debug( - "Server {0.server_address} changed type from {0.previous_description.server_type_name} to " - "{0.new_description.server_type_name}".format(event)) + logging.debug("Server %s changed type from %s to %s", event.server_address, + event.previous_description.server_type_name, event.new_description.server_type_name) def closed(self, event: ServerClosedEvent) -> None: """ When the server was removed from the network. :param event: The event. """ - logging.debug("Server {0.server_address} removed from topology {0.topology_id}".format(event)) + logging.debug("Server %s removed from topology %s", + event.server_address, event.topology_id) diff --git a/mongoOperator/helpers/listeners/mongo/TopologyListener.py b/mongoOperator/helpers/listeners/mongo/TopologyListener.py index c5a88ad..dfe28a1 100644 --- a/mongoOperator/helpers/listeners/mongo/TopologyListener.py +++ b/mongoOperator/helpers/listeners/mongo/TopologyListener.py @@ -23,21 +23,22 @@ def opened(self, event: TopologyOpenedEvent) -> None: When a topology opened. :param event: The event. """ - logging.debug("Topology with id {0.topology_id} opened".format(event)) + logging.debug("Topology with id %s opened", event.topology_id) def description_changed(self, event: TopologyDescriptionChangedEvent) -> None: """ When the description of a topology changed. :param event: The event. """ - logging.debug("Topology description updated for topology id {0.topology_id}".format(event)) + logging.debug("Topology description updated for topology id %s", event.topology_id) previous_topology_type = event.previous_description.topology_type new_topology_type = event.new_description.topology_type if new_topology_type != previous_topology_type: # topology_type_name was added in PyMongo 3.4 - logging.debug("Topology {0.topology_id} changed type from {0.previous_description.topology_type_name} to " - "{0.new_description.topology_type_name}".format(event)) + logging.debug("Topology %s changed type from %s to %s", event.topology_id, + event.previous_description.topology_type_name, + event.new_description.topology_type_name) # The has_writable_server and has_readable_server methods were added in PyMongo 3.4. if not event.new_description.has_writable_server(): @@ -56,5 +57,4 @@ def closed(self, event: TopologyClosedEvent) -> None: When topology was closed. :param event: The event. """ - logging.debug("Topology with id {0.topology_id} closed".format(event)) - + logging.debug("Topology with id %s closed", event.topology_id) diff --git a/mongoOperator/helpers/resourceCheckers/AdminSecretChecker.py b/mongoOperator/helpers/resourceCheckers/AdminSecretChecker.py index c97f6c0..d5b3332 100644 --- a/mongoOperator/helpers/resourceCheckers/AdminSecretChecker.py +++ b/mongoOperator/helpers/resourceCheckers/AdminSecretChecker.py @@ -17,8 +17,6 @@ class AdminSecretChecker(BaseResourceChecker): The inherited methods do not have documentation, see the parent class for more details. """ - T = V1Secret - # Name of the secret for each cluster. NAME_FORMAT = "{}-admin-credentials" @@ -35,18 +33,18 @@ def _generateSecretData() -> Dict[str, str]: """Generates a root user with a random secure password to use in secrets.""" return {"username": "root", "password": b64encode(os.urandom(33)).decode()} - def listResources(self) -> List[T]: + def listResources(self) -> List[V1Secret]: return self.kubernetes_service.listAllSecretsWithLabels().items - def getResource(self, cluster_object: V1MongoClusterConfiguration) -> T: + def getResource(self, cluster_object: V1MongoClusterConfiguration) -> V1Secret: name = self.getSecretName(cluster_object.metadata.name) return self.kubernetes_service.getSecret(name, cluster_object.metadata.namespace) - def createResource(self, cluster_object: V1MongoClusterConfiguration) -> T: + def createResource(self, cluster_object: V1MongoClusterConfiguration) -> V1Secret: name = self.getSecretName(cluster_object.metadata.name) return self.kubernetes_service.createSecret(name, cluster_object.metadata.namespace, self._generateSecretData()) - def updateResource(self, cluster_object: V1MongoClusterConfiguration) -> T: + def updateResource(self, cluster_object: V1MongoClusterConfiguration) -> V1Secret: name = self.getSecretName(cluster_object.metadata.name) return self.kubernetes_service.updateSecret(name, cluster_object.metadata.namespace, self._generateSecretData()) diff --git a/mongoOperator/helpers/resourceCheckers/BaseResourceChecker.py b/mongoOperator/helpers/resourceCheckers/BaseResourceChecker.py index 72aad16..4bf706d 100644 --- a/mongoOperator/helpers/resourceCheckers/BaseResourceChecker.py +++ b/mongoOperator/helpers/resourceCheckers/BaseResourceChecker.py @@ -11,6 +11,8 @@ from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration from mongoOperator.services.KubernetesService import KubernetesService +GenericType = TypeVar("GenericType") + class BaseResourceChecker: """ @@ -18,7 +20,6 @@ class BaseResourceChecker: """ # this is the resource type, e.g. V1Service or V1StatefulSet. - T = TypeVar("T") def __init__(self, kubernetes_service: KubernetesService): self.kubernetes_service = kubernetes_service @@ -32,7 +33,7 @@ def getClusterName(resource_name: str) -> str: """ return resource_name - def checkResource(self, cluster_object: V1MongoClusterConfiguration) -> T: + def checkResource(self, cluster_object: V1MongoClusterConfiguration) -> GenericType: """ Checks whether the resource is up-to-date in Kubernetes, creating or updating it if necessary. :param cluster_object: The cluster object from the YAML file. @@ -40,9 +41,9 @@ def checkResource(self, cluster_object: V1MongoClusterConfiguration) -> T: """ try: resource = self.getResource(cluster_object) - except ApiException as e: + except ApiException as api_exception: resource = None - if e.status != 404: + if api_exception.status != 404: raise if resource: @@ -68,15 +69,15 @@ def cleanResources(self) -> None: try: self.kubernetes_service.getMongoObject(cluster_name, namespace) continue - except ApiException as e: - if e.status != 404: + except ApiException as api_exception: + if api_exception.status != 404: raise # The service exists but the Mongo object it belonged to does not, we have to delete it. self.deleteResource(cluster_name, namespace) @abstractmethod - def listResources(self) -> List[T]: + def listResources(self) -> List[GenericType]: """ Retrieves a list of resource objects. :return: The list of available resources. @@ -84,7 +85,7 @@ def listResources(self) -> List[T]: raise NotImplementedError @abstractmethod - def getResource(self, cluster_object: V1MongoClusterConfiguration) -> T: + def getResource(self, cluster_object: V1MongoClusterConfiguration) -> GenericType: """ Retrieves the resource for the given cluster. :param cluster_object: The cluster object from the YAML file. @@ -94,7 +95,7 @@ def getResource(self, cluster_object: V1MongoClusterConfiguration) -> T: raise NotImplementedError @abstractmethod - def createResource(self, cluster_object: V1MongoClusterConfiguration) -> T: + def createResource(self, cluster_object: V1MongoClusterConfiguration) -> GenericType: """ Creates a new resource instance. :param cluster_object: The cluster object from the YAML file. @@ -103,7 +104,7 @@ def createResource(self, cluster_object: V1MongoClusterConfiguration) -> T: raise NotImplementedError @abstractmethod - def updateResource(self, cluster_object: V1MongoClusterConfiguration) -> T: + def updateResource(self, cluster_object: V1MongoClusterConfiguration) -> GenericType: """ Updates the given resource instance. :param cluster_object: The cluster object from the YAML file. diff --git a/mongoOperator/helpers/resourceCheckers/ServiceChecker.py b/mongoOperator/helpers/resourceCheckers/ServiceChecker.py index 7afe704..26ebf7c 100644 --- a/mongoOperator/helpers/resourceCheckers/ServiceChecker.py +++ b/mongoOperator/helpers/resourceCheckers/ServiceChecker.py @@ -15,18 +15,16 @@ class ServiceChecker(BaseResourceChecker): The inherited methods do not have documentation, see the parent class for more details. """ - T = V1Service - - def listResources(self) -> List[T]: + def listResources(self) -> List[V1Service]: return self.kubernetes_service.listAllServicesWithLabels().items - def getResource(self, cluster_object: V1MongoClusterConfiguration) -> T: + def getResource(self, cluster_object: V1MongoClusterConfiguration) -> V1Service: return self.kubernetes_service.getService(cluster_object.metadata.name, cluster_object.metadata.namespace) - def createResource(self, cluster_object: V1MongoClusterConfiguration) -> T: + def createResource(self, cluster_object: V1MongoClusterConfiguration) -> V1Service: return self.kubernetes_service.createService(cluster_object) - def updateResource(self, cluster_object: V1MongoClusterConfiguration) -> T: + def updateResource(self, cluster_object: V1MongoClusterConfiguration) -> V1Service: return self.kubernetes_service.updateService(cluster_object) def deleteResource(self, cluster_name: str, namespace: str) -> V1Status: diff --git a/mongoOperator/helpers/resourceCheckers/StatefulSetChecker.py b/mongoOperator/helpers/resourceCheckers/StatefulSetChecker.py index cbf93c8..0fd69d9 100644 --- a/mongoOperator/helpers/resourceCheckers/StatefulSetChecker.py +++ b/mongoOperator/helpers/resourceCheckers/StatefulSetChecker.py @@ -15,18 +15,16 @@ class StatefulSetChecker(BaseResourceChecker): The inherited methods do not have documentation, see the parent class for more details. """ - T = V1StatefulSet - - def listResources(self) -> List[T]: + def listResources(self) -> List[V1StatefulSet]: return self.kubernetes_service.listAllStatefulSetsWithLabels().items - def getResource(self, cluster_object: V1MongoClusterConfiguration) -> T: + def getResource(self, cluster_object: V1MongoClusterConfiguration) -> V1StatefulSet: return self.kubernetes_service.getStatefulSet(cluster_object.metadata.name, cluster_object.metadata.namespace) - def createResource(self, cluster_object: V1MongoClusterConfiguration) -> T: + def createResource(self, cluster_object: V1MongoClusterConfiguration) -> V1StatefulSet: return self.kubernetes_service.createStatefulSet(cluster_object) - def updateResource(self, cluster_object: V1MongoClusterConfiguration) -> T: + def updateResource(self, cluster_object: V1MongoClusterConfiguration) -> V1StatefulSet: return self.kubernetes_service.updateStatefulSet(cluster_object) def deleteResource(self, cluster_name: str, namespace: str) -> V1Status: diff --git a/mongoOperator/models/BaseModel.py b/mongoOperator/models/BaseModel.py index ffec79a..71877e8 100644 --- a/mongoOperator/models/BaseModel.py +++ b/mongoOperator/models/BaseModel.py @@ -55,7 +55,7 @@ def __eq__(self, other: any) -> bool: :param other: The other object. :return: True if they are equal, False otherwise. """ - return type(other) == type(self) and other.to_dict() == self.to_dict() + return isinstance(other, type(self)) and other.to_dict() == self.to_dict() def __getitem__(self, attr: str) -> any: """ @@ -74,5 +74,5 @@ def __repr__(self) -> str: """ return "{}({})".format( self.__class__.__name__, - ", ".join('{}={}'.format(attr, value) for attr, value in self.to_dict(skip_validation=True).items()) + ", ".join("{}={}".format(attr, value) for attr, value in self.to_dict(skip_validation=True).items()) ) diff --git a/mongoOperator/models/V1MongoClusterConfigurationSpecMongoDB.py b/mongoOperator/models/V1MongoClusterConfigurationSpecMongoDB.py index 008cc3c..46d5258 100644 --- a/mongoOperator/models/V1MongoClusterConfigurationSpecMongoDB.py +++ b/mongoOperator/models/V1MongoClusterConfigurationSpecMongoDB.py @@ -9,22 +9,22 @@ class V1MongoClusterConfigurationSpecMongoDB(BaseModel): """ Model for the `spec.mongodb` field of the V1MongoClusterConfiguration. """ - + # The name of the deployment. mongo_name = StringField(required=False) - + # The name of the volumes that Kubernetes will create and mount. Defaults to mongo-storage. storage_name = StringField(required=False) - + # The size of the volumes that Kubernetes will create and mount. Defaults to 30Gi. storage_size = StringField(required=False) - + # The path on which the volumes should be mounted. Defaults to /data/db. storage_data_path = StringField(required=False) - + # The Kubernetes storage class to use in Kubernetes. Defaults to None. storage_class_name = StringField(required=False) - + # Kubernetes CPU limit of each Mongo container. Defaults to 1 (vCPU). cpu_limit = StringField(required=False) diff --git a/mongoOperator/models/fields.py b/mongoOperator/models/fields.py index 392f1d7..fedc149 100644 --- a/mongoOperator/models/fields.py +++ b/mongoOperator/models/fields.py @@ -109,6 +109,6 @@ class MongoReplicaCountField(Field): amount of MongoDB replicas. It raises a `ValueError` if the validation fails. """ def parse(self, value: int): - if not isinstance(value, int) or not (3 <= value <= 50): + if not isinstance(value, int) or not 3 <= value <= 50: raise ValueError("The amount of replica sets must be between 3 and 50 (got {}).".format(repr(value))) return super().parse(value) diff --git a/mongoOperator/services/KubernetesService.py b/mongoOperator/services/KubernetesService.py index d389de2..0207c4e 100644 --- a/mongoOperator/services/KubernetesService.py +++ b/mongoOperator/services/KubernetesService.py @@ -4,15 +4,15 @@ import logging from time import sleep from unittest.mock import patch +import yaml -from kubernetes.client.rest import ApiException from typing import Dict, Optional -import yaml from kubernetes.config import load_incluster_config from kubernetes import client from kubernetes.client import Configuration, V1DeleteOptions, V1ServiceList, V1StatefulSetList, V1SecretList, \ V1beta1CustomResourceDefinition +from kubernetes.client.rest import ApiException from Settings import Settings from mongoOperator.helpers.IgnoreIfExists import IgnoreIfExists @@ -55,13 +55,13 @@ def createMongoObjectDefinition(self) -> V1beta1CustomResourceDefinition: # Create it if our CRD doesn't exists yet. logging.info("Custom resource definition %s not found in cluster (available: %s), creating it...", Settings.CUSTOM_OBJECT_RESOURCE_PLURAL, available_resources) - with open("mongo_crd.yaml") as f: - definition_dict = yaml.load(f) + with open("mongo_crd.yaml") as custom_resource_file: + definition_dict = yaml.load(custom_resource_file) body = KubernetesResources.deserialize(definition_dict, "V1beta1CustomResourceDefinition") # issue with kubernetes causes status.condition==null, which raises an exception and breaks the connection. # by ignoring the validation of this field in the client, we can keep the connection open. - with patch("kubernetes.client.models.v1beta1_custom_resource_definition_status.V1beta1CustomResourceDefinitionStatus.conditions"): + with patch("kubernetes.client.models.v1beta1_custom_resource_definition_status.V1beta1CustomResourceDefinitionStatus.conditions"): # noqa: E501 pylint: disable=C0301 return self.extensions_api.create_custom_resource_definition(body) def listMongoObjects(self, **kwargs) -> Dict[str, any]: @@ -79,11 +79,11 @@ def listMongoObjects(self, **kwargs) -> Dict[str, any]: Settings.CUSTOM_OBJECT_API_VERSION, Settings.CUSTOM_OBJECT_RESOURCE_PLURAL, **kwargs) - except ApiException as e: - if e.status != 404: + except ApiException as api_exception: + if api_exception.status != 404: raise logging.info("Could not list the custom Mongo objects: %s. The definition is probably being " - "initialized, we wait %s seconds.", e.reason, self.LIST_CUSTOM_OBJECTS_WAIT) + "initialized, we wait %s seconds.", api_exception.reason, self.LIST_CUSTOM_OBJECTS_WAIT) sleep(self.LIST_CUSTOM_OBJECTS_WAIT) raise TimeoutError("Could not list the custom mongo objects after {} retries" @@ -102,21 +102,21 @@ def getMongoObject(self, name: str, namespace: str) -> V1MongoClusterConfigurati Settings.CUSTOM_OBJECT_RESOURCE_PLURAL, name) - def listAllServicesWithLabels(self, labels: Dict[str, str] = DEFAULT_LABELS) -> V1ServiceList: + def listAllServicesWithLabels(self, labels: Dict[str, str] = None) -> V1ServiceList: """Get all services with the given labels.""" - label_selector = KubernetesResources.createLabelSelector(labels) + label_selector = KubernetesResources.createLabelSelector(labels or self.DEFAULT_LABELS) logging.debug("Getting all services with labels %s", label_selector) return self.core_api.list_service_for_all_namespaces(label_selector=label_selector) - def listAllStatefulSetsWithLabels(self, labels: Dict[str, str] = DEFAULT_LABELS) -> V1StatefulSetList: + def listAllStatefulSetsWithLabels(self, labels: Dict[str, str] = None) -> V1StatefulSetList: """Get all stateful sets with the given labels.""" - label_selector = KubernetesResources.createLabelSelector(labels) + label_selector = KubernetesResources.createLabelSelector(labels or self.DEFAULT_LABELS) logging.debug("Getting all stateful sets with labels %s", label_selector) return self.apps_api.list_stateful_set_for_all_namespaces(label_selector=label_selector) - def listAllSecretsWithLabels(self, labels: Dict[str, str] = DEFAULT_LABELS) -> V1SecretList: + def listAllSecretsWithLabels(self, labels: Dict[str, str] = None) -> V1SecretList: """Get al secrets with the given labels.""" - label_selector = KubernetesResources.createLabelSelector(labels) + label_selector = KubernetesResources.createLabelSelector(labels or self.DEFAULT_LABELS) logging.debug("Getting all secrets with labels %s", label_selector) return self.core_api.list_secret_for_all_namespaces(label_selector=label_selector) @@ -214,6 +214,7 @@ def deleteService(self, name: str, namespace: str) -> client.V1Status: return self.core_api.delete_namespaced_service(name, namespace, body) except TypeError: # bug in kubernetes client 5.0.0 - body parameter was missing. + # pylint: disable=E1120 return self.core_api.delete_namespaced_service(name, namespace) def getStatefulSet(self, name: str, namespace: str) -> client.V1beta1StatefulSet: diff --git a/mongoOperator/services/MongoService.py b/mongoOperator/services/MongoService.py index 4f9ac1b..96ef9c6 100644 --- a/mongoOperator/services/MongoService.py +++ b/mongoOperator/services/MongoService.py @@ -21,7 +21,7 @@ class MongoService: """ Bundled methods for interacting with MongoDB. """ - + # name of the container CONTAINER = "mongodb" NO_REPLICA_SET_RESPONSE = "no replset config has been received" @@ -53,11 +53,11 @@ def checkOrCreateReplicaSet(self, cluster_object: V1MongoClusterConfiguration) - try: create_status_response = self._executeAdminCommand(cluster_object, create_status_command) logging.debug("Checking replicas, received %s", repr(create_status_response)) - + # The replica set could not be checked if create_status_response["ok"] != 1: raise ValueError("Unexpected response trying to check replicas: '{}'".format( - repr(create_status_response))) + repr(create_status_response))) logging.info("The replica set %s @ ns/%s seems to be working properly with %s/%s pods.", cluster_name, namespace, len(create_status_response["members"]), replicas) @@ -123,19 +123,19 @@ def _initializeReplicaSet(cluster_object: V1MongoClusterConfiguration) -> None: """ cluster_name = cluster_object.metadata.name namespace = cluster_object.metadata.namespace - + master_connection = MongoClient(MongoResources.getMemberHostname(0, cluster_name, namespace)) create_replica_command, create_replica_args = MongoResources.createReplicaInitiateCommand(cluster_object) create_replica_response = master_connection.admin.command(create_replica_command, create_replica_args) - + if create_replica_response["ok"] == 1: logging.info("Initialized replica set %s @ ns/%s", cluster_name, namespace) return - + logging.error("Initializing replica set failed, received %s", repr(create_replica_response)) raise ValueError("Unexpected response initializing replica set {} @ ns/{}:\n{}" .format(cluster_name, namespace, create_replica_response)) - + def _createMongoClientForReplicaSet(self, cluster_object: V1MongoClusterConfiguration) -> MongoClient: """ Creates a new MongoClient instance for a replica set. diff --git a/tests/TestMongoOperator.py b/tests/TestMongoOperator.py index 8a35bc9..becb076 100644 --- a/tests/TestMongoOperator.py +++ b/tests/TestMongoOperator.py @@ -14,11 +14,29 @@ class TestMongoOperator(TestCase): @patch("mongoOperator.MongoOperator.sleep") @patch("mongoOperator.MongoOperator.ClusterChecker") def test_run(self, checker_mock, sleep_mock): + checker_mock.return_value.collectGarbage.side_effect = None, Exception() # break the 3rd run + + operator = MongoOperator(sleep_per_run=0.01) + with self.assertRaises(Exception): + operator.run_forever() + + expected_calls = [ + call(), + call().checkExistingClusters(), call().collectGarbage(), + call().checkExistingClusters(), call().collectGarbage(), + ] + self.assertEqual(expected_calls, checker_mock.mock_calls) + self.assertEqual([call(0.01)], sleep_mock.mock_calls) + + @patch("mongoOperator.MongoOperator.sleep") + @patch("mongoOperator.MongoOperator.ClusterChecker") + def test_run_with_interrupt(self, checker_mock, sleep_mock): sleep_mock.side_effect = None, KeyboardInterrupt # we force stop on the 2nd run - checker_mock.return_value.collectGarbage.side_effect = Exception(), None # break the 1st run + checker_mock.return_value.collectGarbage.side_effect = None, None operator = MongoOperator(sleep_per_run=0.01) operator.run_forever() + expected_calls = [ call(), call().checkExistingClusters(), call().collectGarbage(), diff --git a/tests/helpers/TestBackupChecker.py b/tests/helpers/TestBackupChecker.py index e341f3c..5e05f6d 100644 --- a/tests/helpers/TestBackupChecker.py +++ b/tests/helpers/TestBackupChecker.py @@ -39,7 +39,7 @@ def test_backupIfNeeded_check_if_needed(self, backup_mock): # this backup is executed every hour at 0 minutes. self.assertEqual("0 * * * *", self.cluster_object.spec.backups.cron) - key = ('mongo-cluster', self.cluster_object.metadata.namespace) + key = ("mongo-cluster", self.cluster_object.metadata.namespace) expected_calls = [] current_date = datetime(2018, 2, 28, 12, 30, 0) @@ -87,27 +87,27 @@ def test_backup(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock): self.checker.backup(self.cluster_object, current_date) - self.assertEqual([call.getSecret('storage-serviceaccount', 'mongo-operator-cluster')], + self.assertEqual([call.getSecret("storage-serviceaccount", "mongo-operator-cluster")], self.kubernetes_service.mock_calls) subprocess_mock.assert_called_once_with([ - 'mongodump', '--host', 'mongo-cluster-2.mongo-cluster.mongo-operator-cluster.svc.cluster.local', '--gzip', - '--archive=/tmp/' + expected_backup_name + "mongodump", "--host", "mongo-cluster-2.mongo-cluster.mongo-operator-cluster.svc.cluster.local", "--gzip", + "--archive=/tmp/" + expected_backup_name ]) - expected_service_call = call.from_service_account_info({'user': 'password'}) + expected_service_call = call.from_service_account_info({"user": "password"}) self.assertEqual([expected_service_call], gcs_service_mock.mock_calls) expected_storage_calls = [ call(gcs_service_mock.from_service_account_info.return_value.project_id, gcs_service_mock.from_service_account_info.return_value), - call().bucket('ultimaker-mongo-backups'), - call().bucket().blob('test-backups/' + expected_backup_name), - call().bucket().blob().upload_from_filename('/tmp/' + expected_backup_name), + call().bucket("ultimaker-mongo-backups"), + call().bucket().blob("test-backups/" + expected_backup_name), + call().bucket().blob().upload_from_filename("/tmp/" + expected_backup_name), ] self.assertEqual(expected_storage_calls, storage_mock.mock_calls) - expected_os_call = call.remove('/tmp/' + expected_backup_name) + expected_os_call = call.remove("/tmp/" + expected_backup_name) self.assertEqual([expected_os_call], os_mock.mock_calls) @patch("mongoOperator.helpers.BackupHelper.check_output") diff --git a/tests/helpers/TestBaseResourceChecker.py b/tests/helpers/TestBaseResourceChecker.py index 6272162..5856066 100644 --- a/tests/helpers/TestBaseResourceChecker.py +++ b/tests/helpers/TestBaseResourceChecker.py @@ -66,16 +66,16 @@ def test_cleanResources_not_found(self): self.checker.listResources = MagicMock(return_value=[self.cluster_object]) self.checker.deleteResource = MagicMock() self.checker.cleanResources() - self.assertEqual([call.getMongoObject('mongo-cluster', "mongo-operator-cluster")], + self.assertEqual([call.getMongoObject("mongo-cluster", "mongo-operator-cluster")], self.kubernetes_service.mock_calls) - self.checker.deleteResource.assert_called_once_with('mongo-cluster', "mongo-operator-cluster") + self.checker.deleteResource.assert_called_once_with("mongo-cluster", "mongo-operator-cluster") def test_cleanResources_error(self): self.kubernetes_service.getMongoObject.side_effect = ApiException(400) self.checker.listResources = MagicMock(return_value=[self.cluster_object]) with self.assertRaises(ApiException): self.checker.cleanResources() - self.assertEqual([call.getMongoObject('mongo-cluster', "mongo-operator-cluster")], + self.assertEqual([call.getMongoObject("mongo-cluster", "mongo-operator-cluster")], self.kubernetes_service.mock_calls) def test_listResources(self): diff --git a/tests/helpers/TestCommandLogger.py b/tests/helpers/TestCommandLogger.py index b379fd1..6183a5d 100644 --- a/tests/helpers/TestCommandLogger.py +++ b/tests/helpers/TestCommandLogger.py @@ -15,16 +15,16 @@ class CommandEventMock: request_id = 1 connection_id = 1 duration_micros = 10000 - + class TestCommandLogger(TestCase): command_logger = CommandLogger() def test_started(self): self.command_logger.started(event = cast(CommandStartedEvent, CommandEventMock())) - + def test_succeeded(self): self.command_logger.succeeded(event = cast(CommandSucceededEvent, CommandEventMock())) - + def test_failed(self): self.command_logger.failed(event = cast(CommandFailedEvent, CommandEventMock())) diff --git a/tests/helpers/TestRestoreHelper.py b/tests/helpers/TestRestoreHelper.py index d28349b..6c45617 100644 --- a/tests/helpers/TestRestoreHelper.py +++ b/tests/helpers/TestRestoreHelper.py @@ -79,7 +79,7 @@ def test_restore(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock) self.restore_helper.restore(self.cluster_object, expected_backup_name) - self.assertEqual([call.getSecret("storage-serviceaccount", "mongo-operator-cluster")], + self.assertEqual([call.getSecret("storage-serviceaccount", "mongo-operator-cluster")], self.kubernetes_service.mock_calls) subprocess_mock.assert_called_once_with([ @@ -120,9 +120,9 @@ def test_restore_mongo_error(self, subprocess_mock, gcs_service_mock, storage_mo @patch("mongoOperator.helpers.RestoreHelper.check_output") def test_restore_gcs_bad_credentials(self, subprocess_mock): expected_backup_name = "mongodb-backup-mongo-cluster-mongo-cluster-2018-02-28_140000.archive.gz" - + with self.assertRaises(ValueError) as context: self.restore_helper.restore(self.cluster_object, expected_backup_name) - + self.assertIn("Service account info was not in the expected format", str(context.exception)) self.assertEqual(0, subprocess_mock.call_count) diff --git a/tests/helpers/TestServerLogger.py b/tests/helpers/TestServerLogger.py index e7d4e97..ebcc8fc 100644 --- a/tests/helpers/TestServerLogger.py +++ b/tests/helpers/TestServerLogger.py @@ -12,7 +12,7 @@ class ServerDescriptionEventMock: server_type = "foo" server_type_name = "foo" - + class ServerEventMock: """ Mock implementation of a ServerEvent. """ @@ -24,13 +24,13 @@ class ServerEventMock: class TestServerLogger(TestCase): server_logger = ServerLogger() - + def test_opened(self): self.server_logger.opened(event=cast(ServerOpeningEvent, ServerEventMock())) - + def test_closed(self): self.server_logger.closed(event=cast(ServerClosedEvent, ServerEventMock())) - + def test_description_changed(self): serverEventMock = ServerEventMock() serverEventMock.new_description.server_type = "bar" diff --git a/tests/helpers/TestTopologyListener.py b/tests/helpers/TestTopologyListener.py index 0d7df96..d5d3e5a 100644 --- a/tests/helpers/TestTopologyListener.py +++ b/tests/helpers/TestTopologyListener.py @@ -51,5 +51,3 @@ def test_closed(self): topology_logger = TopologyListener(self.cluster_object, replica_set_ready_callback=self._onReplicaSetReadyCallback) topology_logger.closed(event=cast(TopologyClosedEvent, Mock(spec=TopologyClosedEvent))) - - diff --git a/tests/models/TestV1MongoClusterConfiguration.py b/tests/models/TestV1MongoClusterConfiguration.py index 2e1dd43..5ef1305 100644 --- a/tests/models/TestV1MongoClusterConfiguration.py +++ b/tests/models/TestV1MongoClusterConfiguration.py @@ -50,7 +50,7 @@ def test_storage_class_name(self): self.cluster_object.spec.mongodb.storage_class_name = "fast" self.assertEqual(self.cluster_object.to_dict(skip_validation = True), V1MongoClusterConfiguration(**self.cluster_dict).to_dict(skip_validation = True)) - + def test_secret_key_ref(self): service_account = self.cluster_object.spec.backups.gcs.service_account expected = V1ServiceAccountRef(secret_key_ref=V1SecretKeySelector(name="storage-serviceaccount", key="json")) diff --git a/tests/services/TestKubernetesService.py b/tests/services/TestKubernetesService.py index f1dd991..9adf410 100644 --- a/tests/services/TestKubernetesService.py +++ b/tests/services/TestKubernetesService.py @@ -34,7 +34,7 @@ def setUp(self): self.cpu_limit = "100m" self.memory_limit = "64Mi" self.stateful_set = self._createStatefulSet() - + def _createStatefulSet(self) -> V1beta1StatefulSet: return V1beta1StatefulSet( metadata=self._createMeta(self.name), @@ -81,7 +81,7 @@ def _createMeta(self, name: str) -> V1ObjectMeta: name=name, namespace=self.namespace, ) - + def _createResourceLimits(self) -> V1ResourceRequirements: return V1ResourceRequirements( limits={"cpu": self.cpu_limit, "memory": self.memory_limit}, @@ -158,7 +158,7 @@ def test_listMongoObjects(self, client_mock): result = service.listMongoObjects(param="value") expected_calls = [ call.ApiextensionsV1beta1Api().list_custom_resource_definition(), - call.CustomObjectsApi().list_cluster_custom_object('operators.ultimaker.com', 'v1', "mongos", param='value') + call.CustomObjectsApi().list_cluster_custom_object("operators.ultimaker.com", "v1", "mongos", param="value") ] self.assertEqual(expected_calls, client_mock.mock_calls) self.assertEqual(client_mock.CustomObjectsApi().list_cluster_custom_object.return_value, result) @@ -177,7 +177,7 @@ def test_listMongoObjects_400(self, client_mock): expected_calls = [ call.ApiextensionsV1beta1Api().list_custom_resource_definition(), - call.CustomObjectsApi().list_cluster_custom_object('operators.ultimaker.com', 'v1', "mongos", param='value') + call.CustomObjectsApi().list_cluster_custom_object("operators.ultimaker.com", "v1", "mongos", param="value") ] self.assertEqual(expected_calls, client_mock.mock_calls) @@ -195,9 +195,12 @@ def test_listMongoObjects_404(self, client_mock): expected_calls = [ call.ApiextensionsV1beta1Api().list_custom_resource_definition(), - call.CustomObjectsApi().list_cluster_custom_object('operators.ultimaker.com', 'v1', "mongos", param='value'), - call.CustomObjectsApi().list_cluster_custom_object('operators.ultimaker.com', 'v1', "mongos", param='value'), - call.CustomObjectsApi().list_cluster_custom_object('operators.ultimaker.com', 'v1', "mongos", param='value'), + call.CustomObjectsApi().list_cluster_custom_object("operators.ultimaker.com", "v1", "mongos", + param="value"), + call.CustomObjectsApi().list_cluster_custom_object("operators.ultimaker.com", "v1", "mongos", + param="value"), + call.CustomObjectsApi().list_cluster_custom_object("operators.ultimaker.com", "v1", "mongos", + param="value"), ] self.assertEqual(expected_calls, client_mock.mock_calls) self.assertEqual("Could not list the custom mongo objects after 3 retries", str(context.exception)) @@ -208,7 +211,7 @@ def test_getMongoObject(self, client_mock): result = service.getMongoObject(self.name, self.namespace) expected_calls = [call.CustomObjectsApi().get_namespaced_custom_object( - 'operators.ultimaker.com', 'v1', self.namespace, 'mongos', self.name + "operators.ultimaker.com", "v1", self.namespace, "mongos", self.name )] self.assertEqual(expected_calls, client_mock.mock_calls) self.assertEqual(client_mock.CustomObjectsApi().get_namespaced_custom_object.return_value, result) @@ -367,8 +370,8 @@ def test_createService(self, client_mock): metadata=self._createMeta(self.name), spec=V1ServiceSpec( cluster_ip="None", - ports=[V1ServicePort(name='mongod', port=27017, protocol='TCP')], - selector={'heritage': 'mongos', 'name': self.name, 'operated-by': 'operators.ultimaker.com'}, + ports=[V1ServicePort(name="mongod", port=27017, protocol="TCP")], + selector={"heritage": "mongos", "name": self.name, "operated-by": "operators.ultimaker.com"}, ) ) expected_calls = [call.CoreV1Api().create_namespaced_service(self.namespace, expected_body)] @@ -385,8 +388,8 @@ def test_updateService(self, client_mock): metadata=self._createMeta(self.name), spec=V1ServiceSpec( cluster_ip="None", - ports=[V1ServicePort(name='mongod', port=27017, protocol='TCP')], - selector={'heritage': 'mongos', 'name': self.name, 'operated-by': 'operators.ultimaker.com'}, + ports=[V1ServicePort(name="mongod", port=27017, protocol="TCP")], + selector={"heritage": "mongos", "name": self.name, "operated-by": "operators.ultimaker.com"}, ) ) result = service.updateService(self.cluster_object) diff --git a/tests/services/TestMongoService.py b/tests/services/TestMongoService.py index b2de40c..ebd5e4a 100644 --- a/tests/services/TestMongoService.py +++ b/tests/services/TestMongoService.py @@ -47,17 +47,17 @@ def setUp(self): "codeName": "NotYetInitialized" } - self.initiate_ok_response = loads(''' - {"ok": 1.0, "operationTime": {"$timestamp": {"t": 1549963040, "i": 1}}, "$clusterTime": {"clusterTime": - {"$timestamp": {"t": 1549963040, "i": 1}}, "signature": {"hash": {"$binary": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", + self.initiate_ok_response = loads(""" + {"ok": 1.0, "operationTime": {"$timestamp": {"t": 1549963040, "i": 1}}, "$clusterTime": {"clusterTime": + {"$timestamp": {"t": 1549963040, "i": 1}}, "signature": {"hash": {"$binary": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", "$type": "00"}, "keyId": 0}}} - ''') + """) - self.initiate_not_found_response = loads(''' - {"ok": 2, "operationTime": {"$timestamp": {"t": 1549963040, "i": 1}}, "$clusterTime": {"clusterTime": - {"$timestamp": {"t": 1549963040, "i": 1}}, "signature": {"hash": {"$binary": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", + self.initiate_not_found_response = loads(""" + {"ok": 2, "operationTime": {"$timestamp": {"t": 1549963040, "i": 1}}, "$clusterTime": {"clusterTime": + {"$timestamp": {"t": 1549963040, "i": 1}}, "signature": {"hash": {"$binary": "AAAAAAAAAAAAAAAAAAAAAAAAAAA=", "$type": "00"}, "keyId": 0}}} - ''') + """) self.expected_cluster_config = { "_id": "mongo-cluster", @@ -86,8 +86,8 @@ def test_mongoAdminCommand(self, mongo_client_mock): def test__mongoAdminCommand_NodeNotFound(self, mongo_client_mock): mongo_client_mock.return_value.admin.command.side_effect = OperationFailure( - "replSetInitiate quorum check failed because not all proposed set members responded affirmatively:") - + "replSetInitiate quorum check failed because not all proposed set members responded affirmatively:") + with self.assertRaises(OperationFailure) as ex: mongo_command, mongo_args = MongoResources.createReplicaInitiateCommand(self.cluster_object) self.service._executeAdminCommand(self.cluster_object, mongo_command, mongo_args) @@ -110,7 +110,7 @@ def test__mongoAdminCommand_TimeoutError(self, mongo_client_mock): ConnectionFailure("connection attempt failed"), OperationFailure("no replset config has been received") ) - + with self.assertRaises(TimeoutError) as context: self.service._executeAdminCommand(self.cluster_object, "replSetGetStatus") @@ -134,13 +134,12 @@ def test_initializeReplicaSet_ValueError(self, mongo_client_mock): command_result = self._getFixture("initiate-ok") command_result["ok"] = 2 mongo_client_mock.return_value.admin.command.return_value = command_result - + with self.assertRaises(ValueError) as context: self.service._initializeReplicaSet(self.cluster_object) - self.assertEqual("Unexpected response initializing replica set mongo-cluster @ ns/" + - self.cluster_object.metadata.namespace + ":\n" + - str(self.initiate_not_found_response), + self.assertEqual("Unexpected response initializing replica set mongo-cluster @ ns/" + + self.cluster_object.metadata.namespace + ":\n" + str(self.initiate_not_found_response), str(context.exception)) def test_reconfigureReplicaSet(self, mongo_client_mock): @@ -155,9 +154,8 @@ def test_reconfigureReplicaSet_ValueError(self, mongo_client_mock): with self.assertRaises(ValueError) as context: self.service._reconfigureReplicaSet(self.cluster_object) - self.assertEqual("Unexpected response reconfiguring replica set mongo-cluster @ ns/mongo-operator-cluster:\n" + - str(self.initiate_not_found_response), - str(context.exception)) + self.assertEqual("Unexpected response reconfiguring replica set mongo-cluster @ ns/mongo-operator-cluster:\n" + + str(self.initiate_not_found_response), str(context.exception)) def test_checkOrCreateReplicaSet_ok(self, mongo_client_mock): mongo_client_mock.return_value.admin.command.return_value = self._getFixture("replica-status-ok") @@ -191,7 +189,7 @@ def test_checkOrCreateReplicaSet_ValueError(self, mongo_client_mock): def test_checkOrCreateReplicaSet_OperationalFailure(self, mongo_client_mock): bad_value = "BadValue: Unexpected field foo in replica set member configuration for member:" \ - "{ _id: 0, foo: \"localhost:27017\" }" + "{ _id: 0, foo: \"localhost:27017\" }" mongo_client_mock.return_value.admin.command.side_effect = ( OperationFailure(bad_value), OperationFailure(bad_value)) @@ -207,7 +205,7 @@ def test_createUsers_ok(self, mongo_client_mock): def test_createUsers_ValueError(self, mongo_client_mock): mongo_client_mock.return_value.admin.command.side_effect = OperationFailure( - "\"createUser\" had the wrong type. Expected string, found object"), + "\"createUser\" had the wrong type. Expected string, found object"), with self.assertRaises(OperationFailure) as context: self.service.createUsers(self.cluster_object) @@ -249,4 +247,3 @@ def test_onAllHostsReady(self, mongo_client_mock): self.service._initializeReplicaSet.assert_called() mongo_client_mock.assert_not_called() - diff --git a/tests/test_utils.py b/tests/test_utils.py index 2d49322..f8cc321 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -8,10 +8,12 @@ def getExampleClusterDefinition(replicas = 3) -> dict: with open("./examples/mongo-{}-replicas.yaml".format(replicas)) as f: return yaml.load(f) + def getExampleClusterDefinitionWithRestore() -> dict: with open("./examples/mongo-3-replicas-from-backup.yaml") as f: return yaml.load(f) + def dict_eq(one, other): # [(k, getattr(self, k), getattr(other, k)) for k in self.__dict__ if getattr(self, k) != getattr(other, k)] return other and one.__dict__ == other.__dict__ From 741a0023d278335529bd4ed34694772e1603379e Mon Sep 17 00:00:00 2001 From: Rick van den Hof Date: Thu, 14 Feb 2019 17:03:34 +0100 Subject: [PATCH 21/36] Update documentation --- README.md | 3 ++- tests/TestMongoOperator.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 69f50aa..2fa95cd 100644 --- a/README.md +++ b/README.md @@ -75,7 +75,8 @@ The following options are available to use in the `spec` section of the `yaml` c | `mongodb.replicas` | - | The amount of MongoDB replicas that should be available in the replica set. Must be an uneven positive integer and minimum 3. | | * `backups.cron` | - | The cron on which to create a backup to cloud storage. | * `backups.gcs.bucket` | - | The GCS bucket to upload the backup to. | -| `backups.gcs.restore_bucket` | - | The GCS bucket that contains the backup we wish to restore. | +| `backups.gcs.restore_bucket` | - | The GCS bucket that contains the backup we wish to restore. If not specified, the value of backups.gcs.bucket is used. | +| `backups.gcs.restore_from` | - | Filename of the backup in the bucket we wish to restore. If not specified, or set to 'latest', the last backup created is used. | | `backups.gcs.prefix` | backups/ | The file name prefix for the backup file. | > Please read https://docs.mongodb.com/manual/administration/production-notes/#allocate-sufficient-ram-and-cpu for details about why setting the WiredTiger cache size is important when you change the container memory limit from the default value. diff --git a/tests/TestMongoOperator.py b/tests/TestMongoOperator.py index becb076..e1c8abb 100644 --- a/tests/TestMongoOperator.py +++ b/tests/TestMongoOperator.py @@ -43,4 +43,4 @@ def test_run_with_interrupt(self, checker_mock, sleep_mock): call().checkExistingClusters(), call().collectGarbage(), ] self.assertEqual(expected_calls, checker_mock.mock_calls) - self.assertEqual([call(0.01), call(0.01)], sleep_mock.mock_calls) + self.assertEqual([call(0.01)], sleep_mock.mock_calls) From 59894a8ae4e990c76d48f741fe9329dbd8a7c447 Mon Sep 17 00:00:00 2001 From: Rick van den Hof Date: Thu, 14 Feb 2019 17:09:10 +0100 Subject: [PATCH 22/36] Utilize TimeoutError instead of SubprocessError for failing restores --- mongoOperator/helpers/RestoreHelper.py | 4 ++-- tests/TestMongoOperator.py | 3 +-- tests/helpers/TestRestoreHelper.py | 2 +- 3 files changed, 4 insertions(+), 5 deletions(-) diff --git a/mongoOperator/helpers/RestoreHelper.py b/mongoOperator/helpers/RestoreHelper.py index 9866523..dbaac05 100644 --- a/mongoOperator/helpers/RestoreHelper.py +++ b/mongoOperator/helpers/RestoreHelper.py @@ -5,7 +5,7 @@ import logging import os from base64 import b64decode -from subprocess import check_output, CalledProcessError, SubprocessError +from subprocess import check_output, CalledProcessError from time import sleep from google.cloud.storage import Client as StorageClient @@ -130,7 +130,7 @@ def restore(self, cluster_object: V1MongoClusterConfiguration, backup_file: str) logging.error("Could not restore '%s', attempt %d. Return code: %s stderr: '%s' stdout: '%s'", backup_file, _, err.returncode, err.stderr, err.stdout) sleep(self.RESTORE_WAIT) - raise SubprocessError("Could not restore '{}' after {} retries!".format(backup_file, self.RESTORE_RETRIES)) + raise TimeoutError("Could not restore '{}' after {} retries!".format(backup_file, self.RESTORE_RETRIES)) def _downloadBackup(self, cluster_object: V1MongoClusterConfiguration, backup_file: str) -> str: """ diff --git a/tests/TestMongoOperator.py b/tests/TestMongoOperator.py index e1c8abb..ebb1cda 100644 --- a/tests/TestMongoOperator.py +++ b/tests/TestMongoOperator.py @@ -32,7 +32,6 @@ def test_run(self, checker_mock, sleep_mock): @patch("mongoOperator.MongoOperator.ClusterChecker") def test_run_with_interrupt(self, checker_mock, sleep_mock): sleep_mock.side_effect = None, KeyboardInterrupt # we force stop on the 2nd run - checker_mock.return_value.collectGarbage.side_effect = None, None operator = MongoOperator(sleep_per_run=0.01) operator.run_forever() @@ -43,4 +42,4 @@ def test_run_with_interrupt(self, checker_mock, sleep_mock): call().checkExistingClusters(), call().collectGarbage(), ] self.assertEqual(expected_calls, checker_mock.mock_calls) - self.assertEqual([call(0.01)], sleep_mock.mock_calls) + self.assertEqual([call(0.01), call(0.01)], sleep_mock.mock_calls) diff --git a/tests/helpers/TestRestoreHelper.py b/tests/helpers/TestRestoreHelper.py index 6c45617..4b512a8 100644 --- a/tests/helpers/TestRestoreHelper.py +++ b/tests/helpers/TestRestoreHelper.py @@ -110,7 +110,7 @@ def test_restore_mongo_error(self, subprocess_mock, gcs_service_mock, storage_mo subprocess_mock.side_effect = CalledProcessError(3, "cmd", "output", "error") expected_backup_name = "mongodb-backup-mongo-cluster-mongo-cluster-2018-02-28_140000.archive.gz" - with self.assertRaises(SubprocessError) as context: + with self.assertRaises(TimeoutError) as context: self.restore_helper.restore(self.cluster_object, expected_backup_name) self.assertEqual("Could not restore '" + expected_backup_name + "' after 4 retries!", str(context.exception)) From 6be3b84ced4ce6916f474248b92fbf778b7f8864 Mon Sep 17 00:00:00 2001 From: Rick van den Hof Date: Mon, 18 Feb 2019 10:38:30 +0100 Subject: [PATCH 23/36] Use a single build-and-deploy script Add example file for a specific backup to restore. Only run the callback on the first host. Increate initial command timeouts, so we give kubernetes a chance to launch the new pods --- build-and-deploy-local.sh | 34 ++++++++----- ...mongo-3-replicas-from-specific-backup.yaml | 23 +++++++++ .../listeners/mongo/HeartbeatListener.py | 11 ++--- mongoOperator/services/MongoService.py | 6 +-- restore-from-backup-local.sh | 48 ------------------- tests/helpers/TestHeartBeatListener.py | 17 +++++-- 6 files changed, 64 insertions(+), 75 deletions(-) create mode 100644 examples/mongo-3-replicas-from-specific-backup.yaml delete mode 100755 restore-from-backup-local.sh diff --git a/build-and-deploy-local.sh b/build-and-deploy-local.sh index 2bc96bb..4f7fe4c 100755 --- a/build-and-deploy-local.sh +++ b/build-and-deploy-local.sh @@ -1,38 +1,48 @@ #!/usr/bin/env bash +set -eo pipefail + +EXAMPLE_FILE=${1:-examples/mongo-3-replicas.yaml} # set the environment of the minikube docker eval $(minikube docker-env) +readonly NAMESPACE="mongo-operator-cluster" +readonly KUBECTL="kubectl --namespace=${NAMESPACE}" + # build the docker image docker build --tag ultimaker/k8s-mongo-operator:local . # print out the Kubernetes client and server versions -kubectl version +${KUBECTL} version + +if ! kubectl get namespace ${NAMESPACE}; then + kubectl create namespace ${NAMESPACE} +fi # remove the deployment, if needed, and apply the new one -kubectl delete deployment mongo-operator 2>/dev/null -kubectl apply --filename=kubernetes/operators/mongo-operator/service-account.yaml -kubectl apply --filename=kubernetes/operators/mongo-operator/cluster-role.yaml -kubectl apply --filename=kubernetes/operators/mongo-operator/cluster-role-binding.yaml -kubectl apply --filename=kubernetes/operators/mongo-operator/deployment.yaml +${KUBECTL} delete deployment mongo-operator 2>/dev/null || true +${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/service-account.yaml || true +${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/cluster-role.yaml || true +${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/cluster-role-binding.yaml || true +${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/deployment.yaml || true # show some details about the deployment -kubectl describe deploy mongo-operator +${KUBECTL} describe deploy mongo-operator # create a secret with the google account credentials -kubectl create secret generic storage-serviceaccount --from-file=json=google_credentials.json +${KUBECTL} create secret generic storage-serviceaccount --from-file=json=google_credentials.json || true # wait for the pod to startup to retrieve its name sleep 10 -POD_NAME=$(kubectl get pods | grep -e "mongo-operator.*Running" | cut --fields=1 --delimiter=" ") +POD_NAME=$(${KUBECTL} get pods | grep -e "mongo-operator.*Running" | cut --fields=1 --delimiter=" ") if [ -z $POD_NAME ]; then echo "The operator pod is not running!" - kubectl get pods + ${KUBECTL} get pods exit 1 fi # apply the example file -kubectl apply --filename=examples/mongo-3-replicas.yaml +${KUBECTL} apply --filename=${EXAMPLE_FILE} # show the pod logs -kubectl logs ${POD_NAME} --follow +${KUBECTL} logs ${POD_NAME} --follow diff --git a/examples/mongo-3-replicas-from-specific-backup.yaml b/examples/mongo-3-replicas-from-specific-backup.yaml new file mode 100644 index 0000000..4ce9c6b --- /dev/null +++ b/examples/mongo-3-replicas-from-specific-backup.yaml @@ -0,0 +1,23 @@ +apiVersion: "operators.ultimaker.com/v1" +kind: Mongo +metadata: + name: mongo-cluster + namespace: mongo-operator-cluster +spec: + mongodb: + replicas: 3 # Must be between 3 and 50 + cpu_limit: "200m" + memory_limit: "64Mi" + backups: + cron: "0 * * * *" # every hour at 0 minutes + gcs: + bucket: "ultimaker-mongo-backups" + # Set restore_from to 'latest' to use the last backup created when initializing the replicaset. + restore_from: mongodb-backup-default-mongo-cluster-2019-02-07_132931.archive.gz + # set restore_bucket if the file in restore_from is in another bucket. + # restore_bucket: + prefix: "test-backups" + serviceAccount: + secretKeyRef: + name: storage-serviceaccount + key: json diff --git a/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py b/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py index c20a16a..ef4ac6b 100644 --- a/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py +++ b/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py @@ -52,13 +52,10 @@ def succeeded(self, event: ServerHeartbeatSucceededEvent) -> None: host_count_found, self._expected_host_count) return - if "info" in event.reply.document and event.reply.document["info"] == self.INVALID_REPLICA_SET_CONFIG: - # The reply indicated that the replica set config was not correct. - logging.debug("The replica set config was not correct: %s", event.reply) - return - - self._all_hosts_ready_callback(self._cluster_object) - self._callback_executed = True + # Only execute the callback on the first host + if list(self._hosts.keys())[0] == event.connection_id: + self._all_hosts_ready_callback(self._cluster_object) + self._callback_executed = True def failed(self, event: ServerHeartbeatFailedEvent) -> None: """ diff --git a/mongoOperator/services/MongoService.py b/mongoOperator/services/MongoService.py index 96ef9c6..5b1bf62 100644 --- a/mongoOperator/services/MongoService.py +++ b/mongoOperator/services/MongoService.py @@ -143,8 +143,8 @@ def _createMongoClientForReplicaSet(self, cluster_object: V1MongoClusterConfigur """ return MongoClient( MongoResources.getMemberHostnames(cluster_object), - connectTimeoutMS = 60000, - serverSelectionTimeoutMS = 60000, + connectTimeoutMS = 120000, + serverSelectionTimeoutMS = 120000, replicaSet = cluster_object.metadata.name, event_listeners = [ CommandLogger(), @@ -176,7 +176,7 @@ def _onAllHostsReady(self, cluster_object: V1MongoClusterConfiguration) -> None: def _executeAdminCommand(self, cluster_object: V1MongoClusterConfiguration, mongo_command: str, *args, **kwargs ) -> Optional[Dict[str, any]]: """ - Executes the given mongo command inside the pod with the given name. + Executes the given mongo command on the MongoDB cluster. Retries a few times in case we receive a handshake failure. :param name: The name of the cluster. :param namespace: The namespace of the cluster. diff --git a/restore-from-backup-local.sh b/restore-from-backup-local.sh deleted file mode 100755 index 2e04349..0000000 --- a/restore-from-backup-local.sh +++ /dev/null @@ -1,48 +0,0 @@ -#!/usr/bin/env bash -set -e -set -o pipefail - -# set the environment of the minikube docker -eval $(minikube docker-env) - -readonly NAMESPACE="mongo-operator-cluster" -readonly KUBECTL="kubectl --namespace=${NAMESPACE}" - -# build the docker image -docker build --tag ultimaker/k8s-mongo-operator:local . - -# print out the Kubernetes client and server versions -${KUBECTL} version || true - - -if ! kubectl get namespace ${NAMESPACE}; then - kubectl create namespace ${NAMESPACE} -fi - -# remove the deployment, if needed, and apply the new one -${KUBECTL} delete deployment mongo-operator 2>/dev/null || true -${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/service-account.yaml || true -${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/cluster-role.yaml || true -${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/cluster-role-binding.yaml || true -${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/deployment.yaml || true - -# show some details about the deployment -${KUBECTL} describe deploy mongo-operator - -# create a secret with the google account credentials -${KUBECTL} create secret generic storage-serviceaccount --from-file=json=google_credentials.json || true - -# wait for the pod to startup to retrieve its name -sleep 10 -POD_NAME=$(${KUBECTL} get pods | grep -e "mongo-operator.*Running" | cut --fields=1 --delimiter=" ") -if [ -z $POD_NAME ]; then - echo "The operator pod is not running!" - ${KUBECTL} get pods - exit 1 -fi - -# apply the example file -${KUBECTL} apply --filename=examples/mongo-3-replicas-from-backup.yaml - -# show the pod logs -${KUBECTL} logs ${POD_NAME} --follow diff --git a/tests/helpers/TestHeartBeatListener.py b/tests/helpers/TestHeartBeatListener.py index 1a5c9d9..33c0303 100644 --- a/tests/helpers/TestHeartBeatListener.py +++ b/tests/helpers/TestHeartBeatListener.py @@ -6,7 +6,6 @@ from unittest.mock import Mock, MagicMock from pymongo.monitoring import ServerHeartbeatStartedEvent, ServerHeartbeatSucceededEvent, ServerHeartbeatFailedEvent -from pymongo.ismaster import IsMaster from mongoOperator.helpers.listeners.mongo.HeartbeatListener import HeartbeatListener from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration from tests.test_utils import getExampleClusterDefinition @@ -29,12 +28,20 @@ def test_succeeded(self): heartbeat_logger = HeartbeatListener(self.cluster_object, all_hosts_ready_callback=self._onAllHostsReadyCallback) - # Fake two already successful hosts - heartbeat_logger.succeeded(event=cast(ServerHeartbeatSucceededEvent, MagicMock())) - heartbeat_logger.succeeded(event=cast(ServerHeartbeatSucceededEvent, MagicMock())) - heartbeat_event_mock = MagicMock(spec=ServerHeartbeatSucceededEvent) heartbeat_event_mock.reply.document = {"info": ""} + heartbeat_event_mock.connection_id = "host-1", "27017" + + heartbeat_logger.succeeded(event=cast(ServerHeartbeatSucceededEvent, heartbeat_event_mock)) + heartbeat_event_mock.connection_id = "host-2", "27017" + heartbeat_logger.succeeded(event=cast(ServerHeartbeatSucceededEvent, heartbeat_event_mock)) + + heartbeat_event_mock.connection_id = "host-3", "27017" + heartbeat_logger.succeeded(event=cast(ServerHeartbeatSucceededEvent, heartbeat_event_mock)) + + heartbeat_event_mock.reply.document = {"info": ""} + heartbeat_event_mock.connection_id = "host-1", "27017" + heartbeat_logger.succeeded(event=cast(ServerHeartbeatSucceededEvent, heartbeat_event_mock)) self._onAllHostsReadyCallback.assert_called_once_with(self.cluster_object) From 4e0a7671b15a6b6b36cd7cd569713cde60c4909e Mon Sep 17 00:00:00 2001 From: Rick van den Hof Date: Mon, 18 Feb 2019 11:27:59 +0100 Subject: [PATCH 24/36] Rename test file Catch OS error on attempting to remove file (continue anyway) Call __init__ on superclasses --- ...=> mongo-3-replicas-from-latest-backup.yaml} | 0 mongoOperator/helpers/RestoreHelper.py | 17 ++++++++++++----- .../listeners/mongo/HeartbeatListener.py | 1 + .../helpers/listeners/mongo/TopologyListener.py | 2 ++ tests/helpers/TestRestoreHelper.py | 14 ++++++++++++++ tests/test_utils.py | 2 +- 6 files changed, 30 insertions(+), 6 deletions(-) rename examples/{mongo-3-replicas-from-backup.yaml => mongo-3-replicas-from-latest-backup.yaml} (100%) diff --git a/examples/mongo-3-replicas-from-backup.yaml b/examples/mongo-3-replicas-from-latest-backup.yaml similarity index 100% rename from examples/mongo-3-replicas-from-backup.yaml rename to examples/mongo-3-replicas-from-latest-backup.yaml diff --git a/mongoOperator/helpers/RestoreHelper.py b/mongoOperator/helpers/RestoreHelper.py index dbaac05..815e489 100644 --- a/mongoOperator/helpers/RestoreHelper.py +++ b/mongoOperator/helpers/RestoreHelper.py @@ -15,6 +15,8 @@ from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration from mongoOperator.services.KubernetesService import KubernetesService +from typing import Dict + class RestoreHelper: """ @@ -25,13 +27,13 @@ class RestoreHelper: RESTORE_RETRIES = 4 RESTORE_WAIT = 15.0 - def __init__(self, kubernetes_service: KubernetesService): + def __init__(self, kubernetes_service: KubernetesService) -> None: """ :param kubernetes_service: The kubernetes service. """ self.kubernetes_service = kubernetes_service - def _getCredentials(self, cluster_object: V1MongoClusterConfiguration) -> dict: + def _getCredentials(self, cluster_object: V1MongoClusterConfiguration) -> Dict[str, any]: """ Retrieves the storage credentials for the given cluster object from the Kubernetes secret as specified in the cluster object. @@ -44,7 +46,7 @@ def _getCredentials(self, cluster_object: V1MongoClusterConfiguration) -> dict: credentials_json = b64decode(credentials_encoded) return json.loads(credentials_json) - def getLastBackup(self, cluster_object: V1MongoClusterConfiguration) -> str: + def getLastBackupStorageObjectName(self, cluster_object: V1MongoClusterConfiguration) -> str: """ Returns the filename of the last backup file in the bucket. :param cluster_object: The cluster object from the YAML file. @@ -92,7 +94,7 @@ def restoreIfNeeded(self, cluster_object: V1MongoClusterConfiguration) -> bool: if cluster_object.spec.backups.gcs.restore_from is not None: backup_file = cluster_object.spec.backups.gcs.restore_from if backup_file == "latest": - backup_file = self.getLastBackup(cluster_object) + backup_file = self.getLastBackupStorageObjectName(cluster_object) logging.info("Attempting to restore file %s to cluster %s @ ns/%s.", backup_file, cluster_object.metadata.name, cluster_object.metadata.namespace) @@ -124,7 +126,12 @@ def restore(self, cluster_object: V1MongoClusterConfiguration, backup_file: str) restore_output = check_output(["mongorestore", "--host", ",".join(hostnames), "--gzip", "--archive=" + downloaded_file]) logging.info("Restore output: %s", restore_output) - os.remove(downloaded_file) + + try: + os.remove(downloaded_file) + except OSError as err: + logging.error("Unable to remove '%s': %s", downloaded_file, err.strerror) + return True except CalledProcessError as err: logging.error("Could not restore '%s', attempt %d. Return code: %s stderr: '%s' stdout: '%s'", diff --git a/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py b/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py index ef4ac6b..76bd038 100644 --- a/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py +++ b/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py @@ -17,6 +17,7 @@ class HeartbeatListener(ServerHeartbeatListener): def __init__(self, cluster_object: V1MongoClusterConfiguration, all_hosts_ready_callback: Callable[[V1MongoClusterConfiguration], None]) -> None: + super().__init__() self._cluster_object: V1MongoClusterConfiguration = cluster_object self._expected_host_count: int = cluster_object.spec.mongodb.replicas self._hosts: Dict[str, int] = {} diff --git a/mongoOperator/helpers/listeners/mongo/TopologyListener.py b/mongoOperator/helpers/listeners/mongo/TopologyListener.py index dfe28a1..4e23d15 100644 --- a/mongoOperator/helpers/listeners/mongo/TopologyListener.py +++ b/mongoOperator/helpers/listeners/mongo/TopologyListener.py @@ -15,6 +15,8 @@ class TopologyListener(MongoTopologyListener): def __init__(self, cluster_object: V1MongoClusterConfiguration, replica_set_ready_callback: Callable[[V1MongoClusterConfiguration], None]) -> None: + super().__init__() + self._cluster_object: V1MongoClusterConfiguration = cluster_object self._replica_set_ready_callback: Callable[[V1MongoClusterConfiguration], None] = replica_set_ready_callback diff --git a/tests/helpers/TestRestoreHelper.py b/tests/helpers/TestRestoreHelper.py index 4b512a8..4bea878 100644 --- a/tests/helpers/TestRestoreHelper.py +++ b/tests/helpers/TestRestoreHelper.py @@ -117,6 +117,20 @@ def test_restore_mongo_error(self, subprocess_mock, gcs_service_mock, storage_mo self.assertEqual(4, subprocess_mock.call_count) # TODO: assert calls on unused mocks + @patch("mongoOperator.helpers.RestoreHelper.os") + @patch("mongoOperator.helpers.RestoreHelper.StorageClient") + @patch("mongoOperator.helpers.RestoreHelper.ServiceCredentials") + @patch("mongoOperator.helpers.RestoreHelper.check_output") + def test_restore_os_error(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock): + expected_backup_name = "mongodb-backup-mongo-cluster-mongo-cluster-2018-02-28_140000.archive.gz" + os_mock.remove.side_effect = OSError() + + self.restore_helper.restore(self.cluster_object, expected_backup_name) + + os_mock.remove.assert_called_with("/tmp/" + expected_backup_name) + self.assertEqual(1, subprocess_mock.call_count) + # TODO: assert calls on unused mocks + @patch("mongoOperator.helpers.RestoreHelper.check_output") def test_restore_gcs_bad_credentials(self, subprocess_mock): expected_backup_name = "mongodb-backup-mongo-cluster-mongo-cluster-2018-02-28_140000.archive.gz" diff --git a/tests/test_utils.py b/tests/test_utils.py index f8cc321..c8de995 100644 --- a/tests/test_utils.py +++ b/tests/test_utils.py @@ -10,7 +10,7 @@ def getExampleClusterDefinition(replicas = 3) -> dict: def getExampleClusterDefinitionWithRestore() -> dict: - with open("./examples/mongo-3-replicas-from-backup.yaml") as f: + with open("./examples/mongo-3-replicas-from-latest-backup.yaml") as f: return yaml.load(f) From ade01c472005b20633932f3ebb8641631eb0f251 Mon Sep 17 00:00:00 2001 From: Rick van den Hof Date: Mon, 18 Feb 2019 11:34:35 +0100 Subject: [PATCH 25/36] Final processing of comments --- mongoOperator/helpers/RestoreHelper.py | 20 ++++++++++---------- 1 file changed, 10 insertions(+), 10 deletions(-) diff --git a/mongoOperator/helpers/RestoreHelper.py b/mongoOperator/helpers/RestoreHelper.py index 815e489..fe0655d 100644 --- a/mongoOperator/helpers/RestoreHelper.py +++ b/mongoOperator/helpers/RestoreHelper.py @@ -91,18 +91,18 @@ def restoreIfNeeded(self, cluster_object: V1MongoClusterConfiguration) -> bool: :param cluster_object: The cluster object from the YAML file. :return: Whether a restore was executed or not. """ - if cluster_object.spec.backups.gcs.restore_from is not None: - backup_file = cluster_object.spec.backups.gcs.restore_from - if backup_file == "latest": - backup_file = self.getLastBackupStorageObjectName(cluster_object) + if cluster_object.spec.backups.gcs.restore_from is None: + return False - logging.info("Attempting to restore file %s to cluster %s @ ns/%s.", backup_file, - cluster_object.metadata.name, cluster_object.metadata.namespace) + backup_file = cluster_object.spec.backups.gcs.restore_from + if backup_file == "latest": + backup_file = self.getLastBackupStorageObjectName(cluster_object) - self.restore(cluster_object, backup_file) - return True + logging.info("Attempting to restore file %s to cluster %s @ ns/%s.", backup_file, + cluster_object.metadata.name, cluster_object.metadata.namespace) - return False + self.restore(cluster_object, backup_file) + return True def restore(self, cluster_object: V1MongoClusterConfiguration, backup_file: str) -> bool: """ @@ -119,8 +119,8 @@ def restore(self, cluster_object: V1MongoClusterConfiguration, backup_file: str) # Download the backup file from the bucket downloaded_file = self._downloadBackup(cluster_object, backup_file) + # Wait for the replica set to become ready for _ in range(self.RESTORE_RETRIES): - # Wait for the replica set to become ready try: logging.info("Running mongorestore --host %s --gzip --archive=%s", ",".join(hostnames), downloaded_file) restore_output = check_output(["mongorestore", "--host", ",".join(hostnames), "--gzip", From e4ad911ba4541e86590f6ea0b2b2367bdff6ff24 Mon Sep 17 00:00:00 2001 From: Rick van den Hof Date: Tue, 19 Feb 2019 11:50:01 +0100 Subject: [PATCH 26/36] Check if the user exists before creating it --- mongoOperator/services/MongoService.py | 17 ++++++++++++++--- tests/services/TestMongoService.py | 3 +++ 2 files changed, 17 insertions(+), 3 deletions(-) diff --git a/mongoOperator/services/MongoService.py b/mongoOperator/services/MongoService.py index 5b1bf62..8ad4a2f 100644 --- a/mongoOperator/services/MongoService.py +++ b/mongoOperator/services/MongoService.py @@ -87,9 +87,20 @@ def createUsers(self, cluster_object: V1MongoClusterConfiguration) -> None: admin_credentials = self._kubernetes_service.getSecret(secret_name, namespace) create_admin_command, create_admin_args, create_admin_kwargs = MongoResources.createCreateAdminCommand( admin_credentials) - create_admin_response = self._executeAdminCommand(cluster_object, create_admin_command, create_admin_args, - **create_admin_kwargs) - logging.info("Created admin user: %s", create_admin_response) + + if not self.userExists(cluster_object, create_admin_args): + create_admin_response = self._executeAdminCommand(cluster_object, create_admin_command, create_admin_args, + **create_admin_kwargs) + logging.info("Created admin user: %s", create_admin_response) + else: + logging.info("No need to create admin user, it already exists") + + def userExists(self, cluster_object: V1MongoClusterConfiguration, username: str) -> bool: + name = cluster_object.metadata.name + if name not in self._connected_replica_sets: + self._connected_replica_sets[name] = self._createMongoClientForReplicaSet(cluster_object) + + return self._connected_replica_sets[name].system.users.find_one({"user": username}) is not None def _reconfigureReplicaSet(self, cluster_object: V1MongoClusterConfiguration) -> None: """ diff --git a/tests/services/TestMongoService.py b/tests/services/TestMongoService.py index ebd5e4a..eef63f2 100644 --- a/tests/services/TestMongoService.py +++ b/tests/services/TestMongoService.py @@ -200,10 +200,12 @@ def test_checkOrCreateReplicaSet_OperationalFailure(self, mongo_client_mock): self.assertEqual(str(context.exception), bad_value) def test_createUsers_ok(self, mongo_client_mock): + mongo_client_mock.return_value.system.users.find_one.return_value = None mongo_client_mock.return_value.admin.command.return_value = self._getFixture("createUser-ok") self.service.createUsers(self.cluster_object) def test_createUsers_ValueError(self, mongo_client_mock): + mongo_client_mock.return_value.system.users.find_one.return_value = None mongo_client_mock.return_value.admin.command.side_effect = OperationFailure( "\"createUser\" had the wrong type. Expected string, found object"), @@ -213,6 +215,7 @@ def test_createUsers_ValueError(self, mongo_client_mock): self.assertEqual("\"createUser\" had the wrong type. Expected string, found object", str(context.exception)) def test_createUsers_TimeoutError(self, mongo_client_mock): + mongo_client_mock.return_value.system.users.find_one.return_value = None mongo_client_mock.return_value.admin.command.side_effect = ( ConnectionFailure("connection attempt failed"), ConnectionFailure("connection attempt failed"), ConnectionFailure("connection attempt failed"), ConnectionFailure("connection attempt failed") From 893131a5037d47fc537cef35efe4444a06eec18f Mon Sep 17 00:00:00 2001 From: Rick van den Hof Date: Tue, 19 Feb 2019 13:11:17 +0100 Subject: [PATCH 27/36] Fix findUsers command and don't run when google_credentials are missing --- build-and-deploy-local.sh | 5 +++++ .../operators/mongo-operator/deployment.yaml | 2 +- mongoOperator/helpers/MongoResources.py | 14 ++++++++++++++ mongoOperator/services/MongoService.py | 9 ++++----- tests/services/TestMongoService.py | 10 ++++------ 5 files changed, 28 insertions(+), 12 deletions(-) diff --git a/build-and-deploy-local.sh b/build-and-deploy-local.sh index 4f7fe4c..21a2137 100755 --- a/build-and-deploy-local.sh +++ b/build-and-deploy-local.sh @@ -3,6 +3,11 @@ set -eo pipefail EXAMPLE_FILE=${1:-examples/mongo-3-replicas.yaml} +if ! [ -e "google_credentials.json" ]; then + echo "google_credentials.json file is missing, aborting." + exit -1 +fi + # set the environment of the minikube docker eval $(minikube docker-env) diff --git a/kubernetes/operators/mongo-operator/deployment.yaml b/kubernetes/operators/mongo-operator/deployment.yaml index bcf725e..5b766ce 100644 --- a/kubernetes/operators/mongo-operator/deployment.yaml +++ b/kubernetes/operators/mongo-operator/deployment.yaml @@ -26,5 +26,5 @@ spec: name: mongo-operator env: - name: LOGGING_LEVEL - value: INFO + value: DEBUG serviceAccount: mongo-operator-service-account diff --git a/mongoOperator/helpers/MongoResources.py b/mongoOperator/helpers/MongoResources.py index 82998e8..50e9d3b 100644 --- a/mongoOperator/helpers/MongoResources.py +++ b/mongoOperator/helpers/MongoResources.py @@ -74,6 +74,20 @@ def createCreateAdminCommand(cls, admin_credentials: client.V1Secret)\ } return "createUser", admin_username, kwargs + @classmethod + def createFindAdminCommand(cls, admin_username: str) \ + -> Tuple[str, Dict[str, Union[List[Dict[str, str]], Any]]]: + """ + Creates a MongoDB command that creates administrator users. + :param admin_username: The admin username we're looking for. + :return: The command to be sent to MongoDB. + """ + kwargs = { + "user": admin_username, + "db": "admin" + } + return "usersInfo", kwargs + @classmethod def createStatusCommand(cls) -> str: """ diff --git a/mongoOperator/services/MongoService.py b/mongoOperator/services/MongoService.py index 8ad4a2f..c5f93b7 100644 --- a/mongoOperator/services/MongoService.py +++ b/mongoOperator/services/MongoService.py @@ -96,11 +96,10 @@ def createUsers(self, cluster_object: V1MongoClusterConfiguration) -> None: logging.info("No need to create admin user, it already exists") def userExists(self, cluster_object: V1MongoClusterConfiguration, username: str) -> bool: - name = cluster_object.metadata.name - if name not in self._connected_replica_sets: - self._connected_replica_sets[name] = self._createMongoClientForReplicaSet(cluster_object) - - return self._connected_replica_sets[name].system.users.find_one({"user": username}) is not None + find_admin_command, find_admin_kwargs = MongoResources.createFindAdminCommand(username) + find_result = self._executeAdminCommand(cluster_object, find_admin_command, find_admin_kwargs) + logging.debug("Result of user find_one is %s", repr(find_result)) + return find_result is not None def _reconfigureReplicaSet(self, cluster_object: V1MongoClusterConfiguration) -> None: """ diff --git a/tests/services/TestMongoService.py b/tests/services/TestMongoService.py index eef63f2..5b18b0a 100644 --- a/tests/services/TestMongoService.py +++ b/tests/services/TestMongoService.py @@ -200,14 +200,12 @@ def test_checkOrCreateReplicaSet_OperationalFailure(self, mongo_client_mock): self.assertEqual(str(context.exception), bad_value) def test_createUsers_ok(self, mongo_client_mock): - mongo_client_mock.return_value.system.users.find_one.return_value = None - mongo_client_mock.return_value.admin.command.return_value = self._getFixture("createUser-ok") + mongo_client_mock.return_value.admin.command.side_effect = (None, self._getFixture("createUser-ok")) self.service.createUsers(self.cluster_object) def test_createUsers_ValueError(self, mongo_client_mock): - mongo_client_mock.return_value.system.users.find_one.return_value = None - mongo_client_mock.return_value.admin.command.side_effect = OperationFailure( - "\"createUser\" had the wrong type. Expected string, found object"), + mongo_client_mock.return_value.admin.command.side_effect = (None, OperationFailure( + "\"createUser\" had the wrong type. Expected string, found object")) with self.assertRaises(OperationFailure) as context: self.service.createUsers(self.cluster_object) @@ -215,8 +213,8 @@ def test_createUsers_ValueError(self, mongo_client_mock): self.assertEqual("\"createUser\" had the wrong type. Expected string, found object", str(context.exception)) def test_createUsers_TimeoutError(self, mongo_client_mock): - mongo_client_mock.return_value.system.users.find_one.return_value = None mongo_client_mock.return_value.admin.command.side_effect = ( + None, ConnectionFailure("connection attempt failed"), ConnectionFailure("connection attempt failed"), ConnectionFailure("connection attempt failed"), ConnectionFailure("connection attempt failed") ) From ccc5def77c060c0c82085fc04c9816fe32c99966 Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Tue, 19 Feb 2019 13:13:58 +0100 Subject: [PATCH 28/36] Delete secret before trying to create it --- build-and-deploy-local.sh | 1 + 1 file changed, 1 insertion(+) diff --git a/build-and-deploy-local.sh b/build-and-deploy-local.sh index 4f7fe4c..79c8945 100755 --- a/build-and-deploy-local.sh +++ b/build-and-deploy-local.sh @@ -30,6 +30,7 @@ ${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/deployment.yaml ${KUBECTL} describe deploy mongo-operator # create a secret with the google account credentials +${KUBECTL} delete secret storage-serviceaccount || true ${KUBECTL} create secret generic storage-serviceaccount --from-file=json=google_credentials.json || true # wait for the pod to startup to retrieve its name From e0515d6d88570bd8623d80133f966276f235f3b2 Mon Sep 17 00:00:00 2001 From: Rick van den Hof Date: Tue, 19 Feb 2019 14:20:52 +0100 Subject: [PATCH 29/36] Fix creating the replicaSet when it's not necessary Also remove unneed "|| true" statements and delete the google secret if it already exists. --- build-and-deploy-local.sh | 16 +++++++++------- mongoOperator/services/MongoService.py | 2 +- tests/services/TestMongoService.py | 4 ++-- 3 files changed, 12 insertions(+), 10 deletions(-) diff --git a/build-and-deploy-local.sh b/build-and-deploy-local.sh index 4f5a90e..a92611d 100755 --- a/build-and-deploy-local.sh +++ b/build-and-deploy-local.sh @@ -25,18 +25,20 @@ if ! kubectl get namespace ${NAMESPACE}; then fi # remove the deployment, if needed, and apply the new one -${KUBECTL} delete deployment mongo-operator 2>/dev/null || true -${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/service-account.yaml || true -${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/cluster-role.yaml || true -${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/cluster-role-binding.yaml || true -${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/deployment.yaml || true +${KUBECTL} delete deployment mongo-operator 2>/dev/null +${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/service-account.yaml +${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/cluster-role.yaml +${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/cluster-role-binding.yaml +${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/deployment.yaml # show some details about the deployment ${KUBECTL} describe deploy mongo-operator # create a secret with the google account credentials -${KUBECTL} delete secret storage-serviceaccount || true -${KUBECTL} create secret generic storage-serviceaccount --from-file=json=google_credentials.json || true +if ${KUBECTL} get secret storage-serviceaccount 1>/dev/null; then + ${KUBECTL} delete secret storage-serviceaccount +fi +${KUBECTL} create secret generic storage-serviceaccount --from-file=json=google_credentials.json # wait for the pod to startup to retrieve its name sleep 10 diff --git a/mongoOperator/services/MongoService.py b/mongoOperator/services/MongoService.py index c5f93b7..8bc2238 100644 --- a/mongoOperator/services/MongoService.py +++ b/mongoOperator/services/MongoService.py @@ -181,7 +181,7 @@ def _onAllHostsReady(self, cluster_object: V1MongoClusterConfiguration) -> None: Callback triggered when all hosts in the would-be replica set are available. :param cluster_object: The cluster configuration object for the hosts in the would-be replica set. """ - self._initializeReplicaSet(cluster_object) + self.checkOrCreateReplicaSet(cluster_object) def _executeAdminCommand(self, cluster_object: V1MongoClusterConfiguration, mongo_command: str, *args, **kwargs ) -> Optional[Dict[str, any]]: diff --git a/tests/services/TestMongoService.py b/tests/services/TestMongoService.py index 5b18b0a..27e6602 100644 --- a/tests/services/TestMongoService.py +++ b/tests/services/TestMongoService.py @@ -242,9 +242,9 @@ def test_onReplicaSetReady_alreadyRestored(self, mongo_client_mock): mongo_client_mock.assert_not_called() def test_onAllHostsReady(self, mongo_client_mock): - self.service._initializeReplicaSet = MagicMock() + self.service.checkOrCreateReplicaSet = MagicMock() self.service._onAllHostsReady(self.cluster_object) - self.service._initializeReplicaSet.assert_called() + self.service.checkOrCreateReplicaSet.assert_called() mongo_client_mock.assert_not_called() From 45f3b3a7a78dace7897deb50f8103b5331d35079 Mon Sep 17 00:00:00 2001 From: Rick van den Hof Date: Tue, 19 Feb 2019 15:26:40 +0100 Subject: [PATCH 30/36] Add missing docstrings --- .pylintrc | 4 ++-- .../helpers/resourceCheckers/AdminSecretChecker.py | 1 + mongoOperator/services/MongoService.py | 6 ++++++ 3 files changed, 9 insertions(+), 2 deletions(-) diff --git a/.pylintrc b/.pylintrc index c301f38..ca97c02 100644 --- a/.pylintrc +++ b/.pylintrc @@ -35,7 +35,7 @@ max-nested-blocks=3 [MESSAGES CONTROL] # C0326: No space allowed around keyword argument assignment -# C0111: Missing module docstring (missing-docstring) +# C0111: Missing module docstring (because we use a file per class, this would lead to duplicate docstrings) # C0411: Ignore import order because the rules are different than in PyCharm, so automatic imports break lots of builds # R0201: Method could be a function (no-self-use) # R0401: Cyclic imports (cyclic-import) are used for typing @@ -99,4 +99,4 @@ defining-attr-methods=__init__,__new__,setUp,initialize ignored-classes=NotImplemented [VARIABLES] -dummy-variables-rgx=_+[a-z0-9_]{2,30} \ No newline at end of file +dummy-variables-rgx=_+[a-z0-9_]{2,30} diff --git a/mongoOperator/helpers/resourceCheckers/AdminSecretChecker.py b/mongoOperator/helpers/resourceCheckers/AdminSecretChecker.py index d5b3332..d3247fb 100644 --- a/mongoOperator/helpers/resourceCheckers/AdminSecretChecker.py +++ b/mongoOperator/helpers/resourceCheckers/AdminSecretChecker.py @@ -26,6 +26,7 @@ def getClusterName(cls, resource_name: str) -> str: @classmethod def getSecretName(cls, cluster_name: str) -> str: + """ Returns the correctly formatted name of the secret for this cluster.""" return cls.NAME_FORMAT.format(cluster_name) @staticmethod diff --git a/mongoOperator/services/MongoService.py b/mongoOperator/services/MongoService.py index 8bc2238..2a872b0 100644 --- a/mongoOperator/services/MongoService.py +++ b/mongoOperator/services/MongoService.py @@ -96,6 +96,12 @@ def createUsers(self, cluster_object: V1MongoClusterConfiguration) -> None: logging.info("No need to create admin user, it already exists") def userExists(self, cluster_object: V1MongoClusterConfiguration, username: str) -> bool: + """ + Runs a Mongo command to determine whether the specified user exists in this cluster. + :param cluster_object: The cluster object from the YAML file. + :param username: The user we want to lookup. + :return: A boolean value indicating whether the user exists. + """ find_admin_command, find_admin_kwargs = MongoResources.createFindAdminCommand(username) find_result = self._executeAdminCommand(cluster_object, find_admin_command, find_admin_kwargs) logging.debug("Result of user find_one is %s", repr(find_result)) From 51464e00f14b65cd1eb342aeda329356412829e2 Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Tue, 19 Feb 2019 20:50:55 +0100 Subject: [PATCH 31/36] Small changes to make it work on MacOS with Docker for Mac --- build-and-deploy-local.sh | 4 ++-- examples/mongo-3-replicas-from-latest-backup.yaml | 2 ++ examples/mongo-3-replicas-from-specific-backup.yaml | 2 ++ examples/mongo-3-replicas.yaml | 2 ++ examples/mongo-5-replicas.yaml | 2 ++ tests/models/TestV1MongoClusterConfiguration.py | 3 ++- 6 files changed, 12 insertions(+), 3 deletions(-) diff --git a/build-and-deploy-local.sh b/build-and-deploy-local.sh index a92611d..44109a8 100755 --- a/build-and-deploy-local.sh +++ b/build-and-deploy-local.sh @@ -25,7 +25,7 @@ if ! kubectl get namespace ${NAMESPACE}; then fi # remove the deployment, if needed, and apply the new one -${KUBECTL} delete deployment mongo-operator 2>/dev/null +${KUBECTL} delete deployment mongo-operator 1>/dev/null ${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/service-account.yaml ${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/cluster-role.yaml ${KUBECTL} apply --filename=kubernetes/operators/mongo-operator/cluster-role-binding.yaml @@ -42,7 +42,7 @@ ${KUBECTL} create secret generic storage-serviceaccount --from-file=json=google_ # wait for the pod to startup to retrieve its name sleep 10 -POD_NAME=$(${KUBECTL} get pods | grep -e "mongo-operator.*Running" | cut --fields=1 --delimiter=" ") +POD_NAME=$(${KUBECTL} get pod -l app=mongo-operator -o jsonpath="{.items[0].metadata.name}") if [ -z $POD_NAME ]; then echo "The operator pod is not running!" ${KUBECTL} get pods diff --git a/examples/mongo-3-replicas-from-latest-backup.yaml b/examples/mongo-3-replicas-from-latest-backup.yaml index a69b78d..07f6de4 100644 --- a/examples/mongo-3-replicas-from-latest-backup.yaml +++ b/examples/mongo-3-replicas-from-latest-backup.yaml @@ -3,6 +3,8 @@ kind: Mongo metadata: name: mongo-cluster namespace: mongo-operator-cluster + labels: + app: mongo-cluster spec: mongodb: replicas: 3 # Must be between 3 and 50 diff --git a/examples/mongo-3-replicas-from-specific-backup.yaml b/examples/mongo-3-replicas-from-specific-backup.yaml index 4ce9c6b..c1e79ea 100644 --- a/examples/mongo-3-replicas-from-specific-backup.yaml +++ b/examples/mongo-3-replicas-from-specific-backup.yaml @@ -3,6 +3,8 @@ kind: Mongo metadata: name: mongo-cluster namespace: mongo-operator-cluster + labels: + app: mongo-cluster spec: mongodb: replicas: 3 # Must be between 3 and 50 diff --git a/examples/mongo-3-replicas.yaml b/examples/mongo-3-replicas.yaml index cd7115f..5718e86 100644 --- a/examples/mongo-3-replicas.yaml +++ b/examples/mongo-3-replicas.yaml @@ -3,6 +3,8 @@ kind: Mongo metadata: name: mongo-cluster namespace: mongo-operator-cluster + labels: + app: mongo-cluster spec: mongodb: replicas: 3 # Must be between 3 and 50 diff --git a/examples/mongo-5-replicas.yaml b/examples/mongo-5-replicas.yaml index eda315a..b79f417 100644 --- a/examples/mongo-5-replicas.yaml +++ b/examples/mongo-5-replicas.yaml @@ -3,6 +3,8 @@ kind: Mongo metadata: name: mongo-cluster namespace: mongo-operator-cluster + labels: + app: mongo-cluster spec: mongodb: replicas: 5 # Must be between 3 and 50 diff --git a/tests/models/TestV1MongoClusterConfiguration.py b/tests/models/TestV1MongoClusterConfiguration.py index 5ef1305..6926518 100644 --- a/tests/models/TestV1MongoClusterConfiguration.py +++ b/tests/models/TestV1MongoClusterConfiguration.py @@ -62,7 +62,8 @@ def test_equals(self): def test_example_repr(self): expected = \ "V1MongoClusterConfiguration(api_version=operators.ultimaker.com/v1, kind=Mongo, " \ - "metadata={'name': 'mongo-cluster', 'namespace': '" + self.cluster_object.metadata.namespace + "'}, " \ + "metadata={'labels': {'app': 'mongo-cluster'}, 'name': 'mongo-cluster', 'namespace': '" \ + + self.cluster_object.metadata.namespace + "'}, " \ "spec={'backups': {'cron': '0 * * * *', 'gcs': {'bucket': 'ultimaker-mongo-backups', " \ "'prefix': 'test-backups', 'service_account': {'secret_key_ref': {'key': 'json', " \ "'name': 'storage-serviceaccount'}}}}, 'mongodb': {'cpu_limit': '100m', 'memory_limit': '64Mi', " \ From 43fe425bf2e565109625e814cc7e14dcce5fa2e9 Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Wed, 20 Feb 2019 09:05:49 +0100 Subject: [PATCH 32/36] Remove casting in tests --- tests/helpers/TestAdminSecretChecker.py | 4 +--- tests/helpers/TestBackupChecker.py | 4 +--- tests/helpers/TestBaseResourceChecker.py | 4 +--- tests/helpers/TestCommandLogger.py | 9 +++------ tests/helpers/TestHeartBeatListener.py | 17 ++++++++--------- tests/helpers/TestRestoreHelper.py | 6 ++---- tests/helpers/TestServerLogger.py | 9 +++------ tests/helpers/TestServiceChecker.py | 4 +--- tests/helpers/TestStatefulSetChecker.py | 4 +--- tests/helpers/TestTopologyListener.py | 11 ++++------- tests/services/TestMongoService.py | 6 ++---- 11 files changed, 27 insertions(+), 51 deletions(-) diff --git a/tests/helpers/TestAdminSecretChecker.py b/tests/helpers/TestAdminSecretChecker.py index 77edacf..7c7a6ec 100644 --- a/tests/helpers/TestAdminSecretChecker.py +++ b/tests/helpers/TestAdminSecretChecker.py @@ -1,13 +1,11 @@ # Copyright (c) 2018 Ultimaker # !/usr/bin/env python # -*- coding: utf-8 -*- -from typing import cast from unittest import TestCase from unittest.mock import MagicMock, patch from mongoOperator.helpers.resourceCheckers.AdminSecretChecker import AdminSecretChecker from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration -from mongoOperator.services.KubernetesService import KubernetesService from tests.test_utils import getExampleClusterDefinition @@ -16,7 +14,7 @@ class TestAdminSecretChecker(TestCase): def setUp(self): super().setUp() self.kubernetes_service = MagicMock() - self.checker = AdminSecretChecker(cast(KubernetesService, self.kubernetes_service)) + self.checker = AdminSecretChecker(self.kubernetes_service) self.cluster_object = V1MongoClusterConfiguration(**getExampleClusterDefinition()) self.secret_name = self.cluster_object.metadata.name + "-admin-credentials" diff --git a/tests/helpers/TestBackupChecker.py b/tests/helpers/TestBackupChecker.py index 5e05f6d..653dd66 100644 --- a/tests/helpers/TestBackupChecker.py +++ b/tests/helpers/TestBackupChecker.py @@ -3,7 +3,6 @@ # -*- coding: utf-8 -*- import json from base64 import b64encode -from typing import cast from kubernetes.client import V1Secret from subprocess import CalledProcessError, SubprocessError @@ -14,7 +13,6 @@ from mongoOperator.helpers.BackupHelper import BackupHelper from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration -from mongoOperator.services.KubernetesService import KubernetesService from tests.test_utils import getExampleClusterDefinition @@ -23,7 +21,7 @@ def setUp(self): self.cluster_dict = getExampleClusterDefinition() self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) self.kubernetes_service = MagicMock() - self.checker = BackupHelper(cast(KubernetesService, self.kubernetes_service)) + self.checker = BackupHelper(self.kubernetes_service) self.dummy_credentials = b64encode(json.dumps({"user": "password"}).encode()) self.kubernetes_service.getSecret.return_value = V1Secret(data={"json": self.dummy_credentials}) diff --git a/tests/helpers/TestBaseResourceChecker.py b/tests/helpers/TestBaseResourceChecker.py index 5856066..3f0347e 100644 --- a/tests/helpers/TestBaseResourceChecker.py +++ b/tests/helpers/TestBaseResourceChecker.py @@ -1,7 +1,6 @@ # Copyright (c) 2018 Ultimaker # !/usr/bin/env python # -*- coding: utf-8 -*- -from typing import cast from unittest import TestCase from unittest.mock import MagicMock, call @@ -9,7 +8,6 @@ from mongoOperator.helpers.resourceCheckers.BaseResourceChecker import BaseResourceChecker from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration -from mongoOperator.services.KubernetesService import KubernetesService from tests.test_utils import getExampleClusterDefinition @@ -18,7 +16,7 @@ class TestBaseResourceChecker(TestCase): def setUp(self): self.kubernetes_service = MagicMock() - self.checker = BaseResourceChecker(cast(KubernetesService, self.kubernetes_service)) + self.checker = BaseResourceChecker(self.kubernetes_service) self.cluster_object = V1MongoClusterConfiguration(**getExampleClusterDefinition()) def test_getClusterName(self): diff --git a/tests/helpers/TestCommandLogger.py b/tests/helpers/TestCommandLogger.py index 6183a5d..dd55c90 100644 --- a/tests/helpers/TestCommandLogger.py +++ b/tests/helpers/TestCommandLogger.py @@ -1,11 +1,8 @@ # Copyright (c) 2018 Ultimaker # !/usr/bin/env python # -*- coding: utf-8 -*- -from typing import cast from unittest import TestCase -from pymongo.monitoring import CommandStartedEvent, CommandFailedEvent, CommandSucceededEvent - from mongoOperator.helpers.listeners.mongo.CommandLogger import CommandLogger @@ -21,10 +18,10 @@ class TestCommandLogger(TestCase): command_logger = CommandLogger() def test_started(self): - self.command_logger.started(event = cast(CommandStartedEvent, CommandEventMock())) + self.command_logger.started(event=CommandEventMock()) def test_succeeded(self): - self.command_logger.succeeded(event = cast(CommandSucceededEvent, CommandEventMock())) + self.command_logger.succeeded(event=CommandEventMock()) def test_failed(self): - self.command_logger.failed(event = cast(CommandFailedEvent, CommandEventMock())) + self.command_logger.failed(event=CommandEventMock()) diff --git a/tests/helpers/TestHeartBeatListener.py b/tests/helpers/TestHeartBeatListener.py index 33c0303..d938670 100644 --- a/tests/helpers/TestHeartBeatListener.py +++ b/tests/helpers/TestHeartBeatListener.py @@ -1,7 +1,6 @@ # Copyright (c) 2018 Ultimaker # !/usr/bin/env python # -*- coding: utf-8 -*- -from typing import cast from unittest import TestCase from unittest.mock import Mock, MagicMock @@ -22,7 +21,7 @@ def test_started(self): heartbeat_logger = HeartbeatListener(self.cluster_object, all_hosts_ready_callback=self._onAllHostsReadyCallback) - heartbeat_logger.started(event=cast(ServerHeartbeatStartedEvent, Mock(spec=ServerHeartbeatStartedEvent))) + heartbeat_logger.started(event=Mock(spec=ServerHeartbeatStartedEvent)) def test_succeeded(self): heartbeat_logger = HeartbeatListener(self.cluster_object, @@ -32,17 +31,17 @@ def test_succeeded(self): heartbeat_event_mock.reply.document = {"info": ""} heartbeat_event_mock.connection_id = "host-1", "27017" - heartbeat_logger.succeeded(event=cast(ServerHeartbeatSucceededEvent, heartbeat_event_mock)) + heartbeat_logger.succeeded(event=heartbeat_event_mock) heartbeat_event_mock.connection_id = "host-2", "27017" - heartbeat_logger.succeeded(event=cast(ServerHeartbeatSucceededEvent, heartbeat_event_mock)) + heartbeat_logger.succeeded(event=heartbeat_event_mock) heartbeat_event_mock.connection_id = "host-3", "27017" - heartbeat_logger.succeeded(event=cast(ServerHeartbeatSucceededEvent, heartbeat_event_mock)) + heartbeat_logger.succeeded(event=heartbeat_event_mock) heartbeat_event_mock.reply.document = {"info": ""} heartbeat_event_mock.connection_id = "host-1", "27017" - heartbeat_logger.succeeded(event=cast(ServerHeartbeatSucceededEvent, heartbeat_event_mock)) + heartbeat_logger.succeeded(event=heartbeat_event_mock) self._onAllHostsReadyCallback.assert_called_once_with(self.cluster_object) @@ -56,16 +55,16 @@ def test_succeeded_invalid_replicaSet(self): # Call it with invalid replicaSet configuration heartbeat_event_mock = MagicMock(spec=ServerHeartbeatSucceededEvent) heartbeat_event_mock.reply.document = {"info": "Does not have a valid replica set config"} - heartbeat_logger.succeeded(event=cast(ServerHeartbeatSucceededEvent, heartbeat_event_mock)) + heartbeat_logger.succeeded(event=heartbeat_event_mock) def test_succeeded_already_called(self): heartbeat_logger = HeartbeatListener(self.cluster_object, all_hosts_ready_callback=self._onAllHostsReadyCallback) heartbeat_logger._callback_executed = True - heartbeat_logger.succeeded(event=cast(ServerHeartbeatSucceededEvent, MagicMock())) + heartbeat_logger.succeeded(event=MagicMock()) def test_failed(self): heartbeat_logger = HeartbeatListener(self.cluster_object, all_hosts_ready_callback=self._onAllHostsReadyCallback) - heartbeat_logger.failed(event=cast(ServerHeartbeatFailedEvent, Mock(spec=ServerHeartbeatFailedEvent))) + heartbeat_logger.failed(event=Mock(spec=ServerHeartbeatFailedEvent)) diff --git a/tests/helpers/TestRestoreHelper.py b/tests/helpers/TestRestoreHelper.py index 4bea878..1be1a01 100644 --- a/tests/helpers/TestRestoreHelper.py +++ b/tests/helpers/TestRestoreHelper.py @@ -3,17 +3,15 @@ # -*- coding: utf-8 -*- import json from base64 import b64encode -from typing import cast from kubernetes.client import V1Secret -from subprocess import CalledProcessError, SubprocessError +from subprocess import CalledProcessError from unittest import TestCase from unittest.mock import MagicMock, patch, call from mongoOperator.helpers.RestoreHelper import RestoreHelper from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration -from mongoOperator.services.KubernetesService import KubernetesService from tests.test_utils import getExampleClusterDefinitionWithRestore, getExampleClusterDefinition @@ -29,7 +27,7 @@ def setUp(self): self.cluster_dict = getExampleClusterDefinitionWithRestore() self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) self.kubernetes_service = MagicMock() - self.restore_helper = RestoreHelper(cast(KubernetesService, self.kubernetes_service)) + self.restore_helper = RestoreHelper(self.kubernetes_service) self.dummy_credentials = b64encode(json.dumps({"user": "password"}).encode()) self.kubernetes_service.getSecret.return_value = V1Secret(data={"json": self.dummy_credentials}) diff --git a/tests/helpers/TestServerLogger.py b/tests/helpers/TestServerLogger.py index ebcc8fc..2004b3b 100644 --- a/tests/helpers/TestServerLogger.py +++ b/tests/helpers/TestServerLogger.py @@ -1,11 +1,8 @@ # Copyright (c) 2018 Ultimaker # !/usr/bin/env python # -*- coding: utf-8 -*- -from typing import cast from unittest import TestCase -from pymongo.monitoring import ServerOpeningEvent, ServerClosedEvent, ServerDescriptionChangedEvent - from mongoOperator.helpers.listeners.mongo.ServerLogger import ServerLogger @@ -26,12 +23,12 @@ class TestServerLogger(TestCase): server_logger = ServerLogger() def test_opened(self): - self.server_logger.opened(event=cast(ServerOpeningEvent, ServerEventMock())) + self.server_logger.opened(event=ServerEventMock()) def test_closed(self): - self.server_logger.closed(event=cast(ServerClosedEvent, ServerEventMock())) + self.server_logger.closed(event=ServerEventMock()) def test_description_changed(self): serverEventMock = ServerEventMock() serverEventMock.new_description.server_type = "bar" - self.server_logger.description_changed(event=cast(ServerDescriptionChangedEvent, serverEventMock)) + self.server_logger.description_changed(event=serverEventMock) diff --git a/tests/helpers/TestServiceChecker.py b/tests/helpers/TestServiceChecker.py index 2272650..eeace8c 100644 --- a/tests/helpers/TestServiceChecker.py +++ b/tests/helpers/TestServiceChecker.py @@ -1,13 +1,11 @@ # Copyright (c) 2018 Ultimaker # !/usr/bin/env python # -*- coding: utf-8 -*- -from typing import cast from unittest import TestCase from unittest.mock import MagicMock from mongoOperator.helpers.resourceCheckers.ServiceChecker import ServiceChecker from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration -from mongoOperator.services.KubernetesService import KubernetesService from tests.test_utils import getExampleClusterDefinition @@ -16,7 +14,7 @@ class TestServiceChecker(TestCase): def setUp(self): super().setUp() self.kubernetes_service = MagicMock() - self.checker = ServiceChecker(cast(KubernetesService, self.kubernetes_service)) + self.checker = ServiceChecker(self.kubernetes_service) self.cluster_object = V1MongoClusterConfiguration(**getExampleClusterDefinition()) def test_listResources(self): diff --git a/tests/helpers/TestStatefulSetChecker.py b/tests/helpers/TestStatefulSetChecker.py index a81ad83..5587a17 100644 --- a/tests/helpers/TestStatefulSetChecker.py +++ b/tests/helpers/TestStatefulSetChecker.py @@ -1,13 +1,11 @@ # Copyright (c) 2018 Ultimaker # !/usr/bin/env python # -*- coding: utf-8 -*- -from typing import cast from unittest import TestCase from unittest.mock import MagicMock from mongoOperator.helpers.resourceCheckers.StatefulSetChecker import StatefulSetChecker from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration -from mongoOperator.services.KubernetesService import KubernetesService from tests.test_utils import getExampleClusterDefinition @@ -16,7 +14,7 @@ class TestStatefulSetChecker(TestCase): def setUp(self): super().setUp() self.kubernetes_service = MagicMock() - self.checker = StatefulSetChecker(cast(KubernetesService, self.kubernetes_service)) + self.checker = StatefulSetChecker(self.kubernetes_service) self.cluster_object = V1MongoClusterConfiguration(**getExampleClusterDefinition()) def test_listResources(self): diff --git a/tests/helpers/TestTopologyListener.py b/tests/helpers/TestTopologyListener.py index d5d3e5a..b3df745 100644 --- a/tests/helpers/TestTopologyListener.py +++ b/tests/helpers/TestTopologyListener.py @@ -1,7 +1,6 @@ # Copyright (c) 2018 Ultimaker # !/usr/bin/env python # -*- coding: utf-8 -*- -from typing import cast from unittest import TestCase from unittest.mock import Mock, MagicMock @@ -22,7 +21,7 @@ def test_opened(self): topology_logger = TopologyListener(self.cluster_object, replica_set_ready_callback=self._onReplicaSetReadyCallback) - topology_logger.opened(event=cast(TopologyOpenedEvent, Mock(spec=TopologyOpenedEvent))) + topology_logger.opened(event=Mock(spec=TopologyOpenedEvent)) def test_description_changed(self): topology_logger = TopologyListener(self.cluster_object, @@ -32,8 +31,7 @@ def test_description_changed(self): topology_description_changed_event_mock.new_description.topology_type = "foo" topology_description_changed_event_mock.new_description.has_writable_server.return_value = False topology_description_changed_event_mock.new_description.has_readable_server.return_value = False - topology_logger.description_changed(event=cast(TopologyDescriptionChangedEvent, - topology_description_changed_event_mock)) + topology_logger.description_changed(event=topology_description_changed_event_mock) def test_description_changed_with_callback(self): topology_logger = TopologyListener(self.cluster_object, @@ -42,12 +40,11 @@ def test_description_changed_with_callback(self): topology_description_changed_event_mock = MagicMock(spec=TopologyDescriptionChangedEvent) topology_description_changed_event_mock.new_description.has_writable_server.return_value = True - topology_logger.description_changed(event=cast(TopologyDescriptionChangedEvent, - topology_description_changed_event_mock)) + topology_logger.description_changed(event=topology_description_changed_event_mock) self._onReplicaSetReadyCallback.assert_called_once_with(self.cluster_object) def test_closed(self): topology_logger = TopologyListener(self.cluster_object, replica_set_ready_callback=self._onReplicaSetReadyCallback) - topology_logger.closed(event=cast(TopologyClosedEvent, Mock(spec=TopologyClosedEvent))) + topology_logger.closed(event=Mock(spec=TopologyClosedEvent)) diff --git a/tests/services/TestMongoService.py b/tests/services/TestMongoService.py index 27e6602..3e08f11 100644 --- a/tests/services/TestMongoService.py +++ b/tests/services/TestMongoService.py @@ -5,13 +5,11 @@ from base64 import b64encode from kubernetes.client import V1Secret, V1ObjectMeta -from typing import Union from unittest import TestCase -from unittest.mock import MagicMock, patch, Mock +from unittest.mock import MagicMock, patch from mongoOperator.helpers.MongoResources import MongoResources from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration -from mongoOperator.services.KubernetesService import KubernetesService from mongoOperator.services.MongoService import MongoService from tests.test_utils import getExampleClusterDefinition from bson.json_util import loads @@ -25,7 +23,7 @@ class TestMongoService(TestCase): def setUp(self): super().setUp() - self.kubernetes_service: Union[MagicMock, KubernetesService] = MagicMock() + self.kubernetes_service = MagicMock() self.dummy_credentials = b64encode(json.dumps({"user": "password"}).encode()) self.kubernetes_service.getSecret.return_value = V1Secret( metadata=V1ObjectMeta(name="mongo-cluster-admin-credentials", namespace="default"), From 86ab9ded86e99c19d84da7ead7ce36a83ac7e4c1 Mon Sep 17 00:00:00 2001 From: ChrisTerBeke Date: Wed, 20 Feb 2019 09:15:12 +0100 Subject: [PATCH 33/36] Fix most review comments --- Dockerfile | 10 ++++++---- mongoOperator/ClusterManager.py | 4 +--- mongoOperator/MongoOperator.py | 4 ++-- mongoOperator/helpers/RestoreHelper.py | 3 ++- .../helpers/listeners/mongo/HeartbeatListener.py | 1 - mongoOperator/helpers/listeners/mongo/ServerLogger.py | 1 - .../helpers/listeners/mongo/TopologyListener.py | 1 - .../helpers/resourceCheckers/BaseResourceChecker.py | 2 -- mongoOperator/services/KubernetesService.py | 2 +- tests/TestMongoOperator.py | 4 ++-- tests/helpers/TestClusterChecker.py | 6 +++--- tests/helpers/TestRestoreHelper.py | 3 +-- 12 files changed, 18 insertions(+), 23 deletions(-) diff --git a/Dockerfile b/Dockerfile index 92f5d32..e796a4b 100644 --- a/Dockerfile +++ b/Dockerfile @@ -2,13 +2,15 @@ FROM python:3.6-stretch AS base WORKDIR /usr/src/app -COPY requirements.txt ./ -# install MongoDB tools +# Install MongoDB tools RUN apt-key adv --keyserver hkp://keyserver.ubuntu.com:80 --recv 2930ADAE8CAF5059EE73BB4B58712A2291FA4AD5 && \ echo "deb http://repo.mongodb.org/apt/debian stretch/mongodb-org/3.6 main" | tee /etc/apt/sources.list.d/mongodb-org-3.6.list && \ apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y mongodb-org-tools mongodb-org-shell && \ - pip install --no-cache-dir -r requirements.txt + DEBIAN_FRONTEND=noninteractive apt-get install -y mongodb-org-tools mongodb-org-shell + +# Install Python dependencies +COPY requirements.txt ./ +RUN pip install --no-cache-dir -r requirements.txt # This is the container build that will run the "unit tests" FROM base AS tests diff --git a/mongoOperator/ClusterManager.py b/mongoOperator/ClusterManager.py index d1dd941..0fc72fa 100644 --- a/mongoOperator/ClusterManager.py +++ b/mongoOperator/ClusterManager.py @@ -14,11 +14,9 @@ from mongoOperator.services.MongoService import MongoService -class ClusterChecker: +class ClusterManager: """ Manager that periodically checks the status of the MongoDB objects in the cluster. """ - STREAM_REQUEST_TIMEOUT = (15.0, 5.0) # connect, read timeout - def __init__(self) -> None: self._cluster_versions: Dict[Tuple[str, str], str] = {} # format: {(cluster_name, namespace): resource_version} self._kubernetes_service = KubernetesService() diff --git a/mongoOperator/MongoOperator.py b/mongoOperator/MongoOperator.py index 338c94a..58e8574 100644 --- a/mongoOperator/MongoOperator.py +++ b/mongoOperator/MongoOperator.py @@ -4,7 +4,7 @@ import logging from time import sleep -from mongoOperator.ClusterManager import ClusterChecker +from mongoOperator.ClusterManager import ClusterManager class MongoOperator: @@ -22,7 +22,7 @@ def run_forever(self) -> None: """ Runs the mongo operator forever (until a kill command is received). """ - checker = ClusterChecker() + checker = ClusterManager() try: while True: logging.info("**** Running Cluster Check ****") diff --git a/mongoOperator/helpers/RestoreHelper.py b/mongoOperator/helpers/RestoreHelper.py index fe0655d..87b60ef 100644 --- a/mongoOperator/helpers/RestoreHelper.py +++ b/mongoOperator/helpers/RestoreHelper.py @@ -24,6 +24,7 @@ class RestoreHelper: """ DEFAULT_BACKUP_PREFIX = "backups" BACKUP_FILE_FORMAT = "mongodb-backup-{namespace}-{name}-{date}.archive.gz" + LATEST_BACKUP_KEY = "latest" RESTORE_RETRIES = 4 RESTORE_WAIT = 15.0 @@ -95,7 +96,7 @@ def restoreIfNeeded(self, cluster_object: V1MongoClusterConfiguration) -> bool: return False backup_file = cluster_object.spec.backups.gcs.restore_from - if backup_file == "latest": + if backup_file == self.LATEST_BACKUP_KEY: backup_file = self.getLastBackupStorageObjectName(cluster_object) logging.info("Attempting to restore file %s to cluster %s @ ns/%s.", backup_file, diff --git a/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py b/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py index 76bd038..f0acfb1 100644 --- a/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py +++ b/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py @@ -37,7 +37,6 @@ def succeeded(self, event: ServerHeartbeatSucceededEvent) -> None: When the heartbeat arrived. :param event: The event. """ - # The reply.document attribute was added in PyMongo 3.4. logging.debug("Heartbeat to server %s succeeded with reply %s", event.connection_id, event.reply.document) self._hosts[event.connection_id] = 1 diff --git a/mongoOperator/helpers/listeners/mongo/ServerLogger.py b/mongoOperator/helpers/listeners/mongo/ServerLogger.py index 0e52a3d..d5aaea0 100644 --- a/mongoOperator/helpers/listeners/mongo/ServerLogger.py +++ b/mongoOperator/helpers/listeners/mongo/ServerLogger.py @@ -24,7 +24,6 @@ def description_changed(self, event: ServerDescriptionChangedEvent) -> None: previous_server_type = event.previous_description.server_type new_server_type = event.new_description.server_type if new_server_type != previous_server_type: - # server_type_name was added in PyMongo 3.4 logging.debug("Server %s changed type from %s to %s", event.server_address, event.previous_description.server_type_name, event.new_description.server_type_name) diff --git a/mongoOperator/helpers/listeners/mongo/TopologyListener.py b/mongoOperator/helpers/listeners/mongo/TopologyListener.py index 4e23d15..f087c63 100644 --- a/mongoOperator/helpers/listeners/mongo/TopologyListener.py +++ b/mongoOperator/helpers/listeners/mongo/TopologyListener.py @@ -37,7 +37,6 @@ def description_changed(self, event: TopologyDescriptionChangedEvent) -> None: previous_topology_type = event.previous_description.topology_type new_topology_type = event.new_description.topology_type if new_topology_type != previous_topology_type: - # topology_type_name was added in PyMongo 3.4 logging.debug("Topology %s changed type from %s to %s", event.topology_id, event.previous_description.topology_type_name, event.new_description.topology_type_name) diff --git a/mongoOperator/helpers/resourceCheckers/BaseResourceChecker.py b/mongoOperator/helpers/resourceCheckers/BaseResourceChecker.py index 4bf706d..e2d8f0d 100644 --- a/mongoOperator/helpers/resourceCheckers/BaseResourceChecker.py +++ b/mongoOperator/helpers/resourceCheckers/BaseResourceChecker.py @@ -19,8 +19,6 @@ class BaseResourceChecker: Base class for services that can check Kubernetes resources. """ - # this is the resource type, e.g. V1Service or V1StatefulSet. - def __init__(self, kubernetes_service: KubernetesService): self.kubernetes_service = kubernetes_service diff --git a/mongoOperator/services/KubernetesService.py b/mongoOperator/services/KubernetesService.py index 0207c4e..3c159c1 100644 --- a/mongoOperator/services/KubernetesService.py +++ b/mongoOperator/services/KubernetesService.py @@ -102,7 +102,7 @@ def getMongoObject(self, name: str, namespace: str) -> V1MongoClusterConfigurati Settings.CUSTOM_OBJECT_RESOURCE_PLURAL, name) - def listAllServicesWithLabels(self, labels: Dict[str, str] = None) -> V1ServiceList: + def listAllServicesWithLabels(self, labels: Optional[Dict[str, str]] = None) -> V1ServiceList: """Get all services with the given labels.""" label_selector = KubernetesResources.createLabelSelector(labels or self.DEFAULT_LABELS) logging.debug("Getting all services with labels %s", label_selector) diff --git a/tests/TestMongoOperator.py b/tests/TestMongoOperator.py index ebb1cda..cbcf6a9 100644 --- a/tests/TestMongoOperator.py +++ b/tests/TestMongoOperator.py @@ -12,7 +12,7 @@ class TestMongoOperator(TestCase): maxDiff = None @patch("mongoOperator.MongoOperator.sleep") - @patch("mongoOperator.MongoOperator.ClusterChecker") + @patch("mongoOperator.MongoOperator.ClusterManager") def test_run(self, checker_mock, sleep_mock): checker_mock.return_value.collectGarbage.side_effect = None, Exception() # break the 3rd run @@ -29,7 +29,7 @@ def test_run(self, checker_mock, sleep_mock): self.assertEqual([call(0.01)], sleep_mock.mock_calls) @patch("mongoOperator.MongoOperator.sleep") - @patch("mongoOperator.MongoOperator.ClusterChecker") + @patch("mongoOperator.MongoOperator.ClusterManager") def test_run_with_interrupt(self, checker_mock, sleep_mock): sleep_mock.side_effect = None, KeyboardInterrupt # we force stop on the 2nd run diff --git a/tests/helpers/TestClusterChecker.py b/tests/helpers/TestClusterChecker.py index 91a5ac0..8131519 100644 --- a/tests/helpers/TestClusterChecker.py +++ b/tests/helpers/TestClusterChecker.py @@ -3,19 +3,19 @@ # -*- coding: utf-8 -*- from unittest import TestCase from unittest.mock import patch, call -from mongoOperator.ClusterManager import ClusterChecker +from mongoOperator.ClusterManager import ClusterManager from mongoOperator.models.V1MongoClusterConfiguration import V1MongoClusterConfiguration from tests.test_utils import getExampleClusterDefinition from bson.json_util import loads -class TestClusterChecker(TestCase): +class TestClusterManager(TestCase): maxDiff = None def setUp(self): super().setUp() with patch("mongoOperator.ClusterManager.KubernetesService") as ks: - self.checker = ClusterChecker() + self.checker = ClusterManager() self.kubernetes_service = ks.return_value self.cluster_dict = getExampleClusterDefinition() self.cluster_dict["metadata"]["resourceVersion"] = "100" diff --git a/tests/helpers/TestRestoreHelper.py b/tests/helpers/TestRestoreHelper.py index 1be1a01..1266dad 100644 --- a/tests/helpers/TestRestoreHelper.py +++ b/tests/helpers/TestRestoreHelper.py @@ -65,8 +65,7 @@ def test_restoreIfNeeded(self, restore_mock, gcs_service_mock, storage_mock): self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) self.restore_helper.restoreIfNeeded(self.cluster_object) - print(restore_mock.mock_calls) - assert not restore_mock.called, "restore_mock should not have been called" + self.assertFalse(restore_mock.called, "restore_mock should not have been called") @patch("mongoOperator.helpers.RestoreHelper.os") @patch("mongoOperator.helpers.RestoreHelper.StorageClient") From 47a15dfda3a910add5c37cc155019a66afa84473 Mon Sep 17 00:00:00 2001 From: Rick van den Hof Date: Wed, 20 Feb 2019 12:23:22 +0100 Subject: [PATCH 34/36] Process comments --- mongoOperator/helpers/MongoResources.py | 4 ++-- .../listeners/mongo/HeartbeatListener.py | 15 ++++++++----- tests/TestMongoOperator.py | 2 +- tests/helpers/TestHeartBeatListener.py | 2 +- tests/helpers/TestRestoreHelper.py | 22 ++++++++++++++++--- 5 files changed, 32 insertions(+), 13 deletions(-) diff --git a/mongoOperator/helpers/MongoResources.py b/mongoOperator/helpers/MongoResources.py index 50e9d3b..c9b172b 100644 --- a/mongoOperator/helpers/MongoResources.py +++ b/mongoOperator/helpers/MongoResources.py @@ -21,8 +21,8 @@ def getMemberHostname(cls, pod_index, cluster_name, namespace) -> str: :param namespace: The namespace of the cluster. :return: The name of the host. """ - return "{cluster_name}-{}.{cluster_name}.{}.svc.cluster.local".format(pod_index, namespace, - cluster_name=cluster_name) + return "{cluster_name}-{pod_index}.{cluster_name}.{namespace}.svc.cluster.local".format( + pod_index=pod_index, namespace=namespace, cluster_name=cluster_name) @classmethod def getMemberHostnames(cls, cluster_object: V1MongoClusterConfiguration) -> List[str]: diff --git a/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py b/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py index f0acfb1..476a910 100644 --- a/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py +++ b/mongoOperator/helpers/listeners/mongo/HeartbeatListener.py @@ -20,7 +20,7 @@ def __init__(self, cluster_object: V1MongoClusterConfiguration, super().__init__() self._cluster_object: V1MongoClusterConfiguration = cluster_object self._expected_host_count: int = cluster_object.spec.mongodb.replicas - self._hosts: Dict[str, int] = {} + self._hosts_status: Dict[str, int] = {} self._all_hosts_ready_callback: Callable[[V1MongoClusterConfiguration], None] = all_hosts_ready_callback self._callback_executed = False @@ -30,7 +30,8 @@ def started(self, event: ServerHeartbeatStartedEvent) -> None: :param event: The event. """ logging.debug("Heartbeat sent to server %s", event.connection_id) - self._hosts[event.connection_id] = 0 + # A value of 0 indicates we know about the server, but not if it's alive + self._hosts_status[event.connection_id] = 0 def succeeded(self, event: ServerHeartbeatSucceededEvent) -> None: """ @@ -38,14 +39,15 @@ def succeeded(self, event: ServerHeartbeatSucceededEvent) -> None: :param event: The event. """ logging.debug("Heartbeat to server %s succeeded with reply %s", event.connection_id, event.reply.document) - self._hosts[event.connection_id] = 1 + # A value of 1 indicates pymongo was able to connect successfully + self._hosts_status[event.connection_id] = 1 if self._callback_executed: # The callback was already executed so we don't have to again. logging.debug("The callback was already executed") return - host_count_found = len(list(filter(lambda x: self._hosts[x] == 1, self._hosts))) + host_count_found = len(list(filter(lambda x: self._hosts_status[x] == 1, self._hosts_status))) if self._expected_host_count != host_count_found: # The amount of returned hosts was different than expected. logging.debug("The host count did not match the expected host count: %s found, %s expected", @@ -53,7 +55,7 @@ def succeeded(self, event: ServerHeartbeatSucceededEvent) -> None: return # Only execute the callback on the first host - if list(self._hosts.keys())[0] == event.connection_id: + if list(self._hosts_status.keys())[0] == event.connection_id: self._all_hosts_ready_callback(self._cluster_object) self._callback_executed = True @@ -64,4 +66,5 @@ def failed(self, event: ServerHeartbeatFailedEvent) -> None: """ logging.warning("Heartbeat to server %s failed with error %s", event.connection_id, event.reply) - self._hosts[event.connection_id] = -1 + # A value of -1 indicates pymongo was unable to connect + self._hosts_status[event.connection_id] = -1 diff --git a/tests/TestMongoOperator.py b/tests/TestMongoOperator.py index cbcf6a9..af97370 100644 --- a/tests/TestMongoOperator.py +++ b/tests/TestMongoOperator.py @@ -14,7 +14,7 @@ class TestMongoOperator(TestCase): @patch("mongoOperator.MongoOperator.sleep") @patch("mongoOperator.MongoOperator.ClusterManager") def test_run(self, checker_mock, sleep_mock): - checker_mock.return_value.collectGarbage.side_effect = None, Exception() # break the 3rd run + checker_mock.return_value.collectGarbage.side_effect = None, Exception() # break the 2nd run operator = MongoOperator(sleep_per_run=0.01) with self.assertRaises(Exception): diff --git a/tests/helpers/TestHeartBeatListener.py b/tests/helpers/TestHeartBeatListener.py index d938670..d60e19a 100644 --- a/tests/helpers/TestHeartBeatListener.py +++ b/tests/helpers/TestHeartBeatListener.py @@ -50,7 +50,7 @@ def test_succeeded_invalid_replicaSet(self): all_hosts_ready_callback=self._onAllHostsReadyCallback) # Fake two already successful hosts - heartbeat_logger._hosts = {"foo": 1, "bar": 1} + heartbeat_logger._hosts_status = {"foo": 1, "bar": 1} # Call it with invalid replicaSet configuration heartbeat_event_mock = MagicMock(spec=ServerHeartbeatSucceededEvent) diff --git a/tests/helpers/TestRestoreHelper.py b/tests/helpers/TestRestoreHelper.py index 1266dad..c805454 100644 --- a/tests/helpers/TestRestoreHelper.py +++ b/tests/helpers/TestRestoreHelper.py @@ -61,7 +61,6 @@ def test_restoreIfNeeded(self, restore_mock, gcs_service_mock, storage_mock): restore_mock.reset_mock() self.cluster_dict = getExampleClusterDefinition() - print(repr(self.cluster_dict)) self.cluster_object = V1MongoClusterConfiguration(**self.cluster_dict) self.restore_helper.restoreIfNeeded(self.cluster_object) @@ -112,7 +111,16 @@ def test_restore_mongo_error(self, subprocess_mock, gcs_service_mock, storage_mo self.assertEqual("Could not restore '" + expected_backup_name + "' after 4 retries!", str(context.exception)) self.assertEqual(4, subprocess_mock.call_count) - # TODO: assert calls on unused mocks + self.assertEqual([call.from_service_account_info({"user": "password"})], gcs_service_mock.mock_calls) + expected_storage_calls = [ + call(gcs_service_mock.from_service_account_info.return_value.project_id, + gcs_service_mock.from_service_account_info.return_value), + call().get_bucket("ultimaker-mongo-backups"), + call().get_bucket().blob("test-backups/" + expected_backup_name), + call().get_bucket().blob().download_to_filename("/tmp/" + expected_backup_name), + ] + self.assertEqual(expected_storage_calls, storage_mock.mock_calls) + self.assertFalse(os_mock.called, "os_mock should not have been called") @patch("mongoOperator.helpers.RestoreHelper.os") @patch("mongoOperator.helpers.RestoreHelper.StorageClient") @@ -126,7 +134,15 @@ def test_restore_os_error(self, subprocess_mock, gcs_service_mock, storage_mock, os_mock.remove.assert_called_with("/tmp/" + expected_backup_name) self.assertEqual(1, subprocess_mock.call_count) - # TODO: assert calls on unused mocks + self.assertEqual([call.from_service_account_info({"user": "password"})], gcs_service_mock.mock_calls) + expected_storage_calls = [ + call(gcs_service_mock.from_service_account_info.return_value.project_id, + gcs_service_mock.from_service_account_info.return_value), + call().get_bucket("ultimaker-mongo-backups"), + call().get_bucket().blob("test-backups/" + expected_backup_name), + call().get_bucket().blob().download_to_filename("/tmp/" + expected_backup_name), + ] + self.assertEqual(expected_storage_calls, storage_mock.mock_calls) @patch("mongoOperator.helpers.RestoreHelper.check_output") def test_restore_gcs_bad_credentials(self, subprocess_mock): From 53eb2e24f2ef0d3a35de87eb915a9cfb0a6012bf Mon Sep 17 00:00:00 2001 From: Rick van den Hof Date: Wed, 20 Feb 2019 14:47:44 +0100 Subject: [PATCH 35/36] Rename google_credentials.json to google-credentials.json --- .gitignore | 2 +- README.md | 2 +- build-and-deploy-local.sh | 6 +++--- 3 files changed, 5 insertions(+), 5 deletions(-) diff --git a/.gitignore b/.gitignore index be91d96..56b9d48 100644 --- a/.gitignore +++ b/.gitignore @@ -106,4 +106,4 @@ ENV/ .idea # Google credentials -google_credentials.json +google-credentials.json diff --git a/README.md b/README.md index 2fa95cd..57f4efc 100644 --- a/README.md +++ b/README.md @@ -96,7 +96,7 @@ minikube start Then you can run our test script to deploy the operator and execute some end-to-end tests. -Note that this script assumes there is a file `google_credentials.json` in this directory that will be uploaded to Kubernetes as the secret for the backups. +Note that this script assumes there is a file `google-credentials.json` in this directory that will be uploaded to Kubernetes as the secret for the backups. You will need to download this file from Google in order to run the script. ```bash diff --git a/build-and-deploy-local.sh b/build-and-deploy-local.sh index 44109a8..d15f488 100755 --- a/build-and-deploy-local.sh +++ b/build-and-deploy-local.sh @@ -3,8 +3,8 @@ set -eo pipefail EXAMPLE_FILE=${1:-examples/mongo-3-replicas.yaml} -if ! [ -e "google_credentials.json" ]; then - echo "google_credentials.json file is missing, aborting." +if ! [[ -e "google-credentials.json" ]]; then + echo "google-credentials.json file is missing, aborting." exit -1 fi @@ -38,7 +38,7 @@ ${KUBECTL} describe deploy mongo-operator if ${KUBECTL} get secret storage-serviceaccount 1>/dev/null; then ${KUBECTL} delete secret storage-serviceaccount fi -${KUBECTL} create secret generic storage-serviceaccount --from-file=json=google_credentials.json +${KUBECTL} create secret generic storage-serviceaccount --from-file=json=google-credentials.json # wait for the pod to startup to retrieve its name sleep 10 From b4a22e8804e9855f221a249065011254042a61ee Mon Sep 17 00:00:00 2001 From: Rick van den Hof Date: Fri, 22 Feb 2019 10:07:55 +0100 Subject: [PATCH 36/36] Add assertions and remove unneeded test --- tests/helpers/TestHeartBeatListener.py | 18 ++++++------------ 1 file changed, 6 insertions(+), 12 deletions(-) diff --git a/tests/helpers/TestHeartBeatListener.py b/tests/helpers/TestHeartBeatListener.py index d60e19a..7f693db 100644 --- a/tests/helpers/TestHeartBeatListener.py +++ b/tests/helpers/TestHeartBeatListener.py @@ -32,11 +32,15 @@ def test_succeeded(self): heartbeat_event_mock.connection_id = "host-1", "27017" heartbeat_logger.succeeded(event=heartbeat_event_mock) + self.assertFalse(self._onAllHostsReadyCallback.called) + heartbeat_event_mock.connection_id = "host-2", "27017" heartbeat_logger.succeeded(event=heartbeat_event_mock) + self.assertFalse(self._onAllHostsReadyCallback.called) heartbeat_event_mock.connection_id = "host-3", "27017" heartbeat_logger.succeeded(event=heartbeat_event_mock) + self.assertFalse(self._onAllHostsReadyCallback.called) heartbeat_event_mock.reply.document = {"info": ""} heartbeat_event_mock.connection_id = "host-1", "27017" @@ -45,26 +49,16 @@ def test_succeeded(self): self._onAllHostsReadyCallback.assert_called_once_with(self.cluster_object) - def test_succeeded_invalid_replicaSet(self): - heartbeat_logger = HeartbeatListener(self.cluster_object, - all_hosts_ready_callback=self._onAllHostsReadyCallback) - - # Fake two already successful hosts - heartbeat_logger._hosts_status = {"foo": 1, "bar": 1} - - # Call it with invalid replicaSet configuration - heartbeat_event_mock = MagicMock(spec=ServerHeartbeatSucceededEvent) - heartbeat_event_mock.reply.document = {"info": "Does not have a valid replica set config"} - heartbeat_logger.succeeded(event=heartbeat_event_mock) - def test_succeeded_already_called(self): heartbeat_logger = HeartbeatListener(self.cluster_object, all_hosts_ready_callback=self._onAllHostsReadyCallback) heartbeat_logger._callback_executed = True heartbeat_logger.succeeded(event=MagicMock()) + self.assertFalse(self._onAllHostsReadyCallback.called) def test_failed(self): heartbeat_logger = HeartbeatListener(self.cluster_object, all_hosts_ready_callback=self._onAllHostsReadyCallback) heartbeat_logger.failed(event=Mock(spec=ServerHeartbeatFailedEvent)) + self.assertFalse(self._onAllHostsReadyCallback.called)