From 3f6a5248c7b15afd65756f4f1431389c83800028 Mon Sep 17 00:00:00 2001 From: "renovate[bot]" <29139614+renovate[bot]@users.noreply.github.com> Date: Wed, 22 Nov 2023 09:20:11 +0000 Subject: [PATCH 01/39] feat(Dependencies): Update dependency ansible to v7 [SECURITY] | datasource | package | from | to | | ---------- | ------- | ----- | ----- | | pypi | ansible | 5.1.0 | 7.0.0 | --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index eeb19a8..e18b0f7 100644 --- a/requirements.txt +++ b/requirements.txt @@ -4,7 +4,7 @@ python-keystoneclient==5.2.0 openstacksdk==1.5.0 deprecated==1.2.14 Click==8.1.7 -ansible==5.1.0 +ansible==7.0.0 flake8==6.1.0 paramiko==2.12.0 ruamel.yaml==0.18.5 From f01070e3f06a98e8a122059b30b83314b5cb0f83 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Tue, 19 Dec 2023 10:52:12 +0100 Subject: [PATCH 02/39] started tests --- simple_vm_client/test_openstack_connector.py | 102 +++++++++++++++++++ 1 file changed, 102 insertions(+) create mode 100644 simple_vm_client/test_openstack_connector.py diff --git a/simple_vm_client/test_openstack_connector.py b/simple_vm_client/test_openstack_connector.py new file mode 100644 index 0000000..2eb9cf8 --- /dev/null +++ b/simple_vm_client/test_openstack_connector.py @@ -0,0 +1,102 @@ +import unittest +from unittest.mock import MagicMock, patch +from openstack_connector.openstack_connector import OpenStackConnector +from openstack.test import fakes +from openstack.compute.v2 import limits +from openstack.compute.v2.image import Image +from openstack.block_storage.v3.limits import Limit +from openstack.image.v2 import image as image_module +from ttypes import ImageNotFoundException + +EXPECTED_IMAGE = image_module.Image(id='image_id_2', status='active', name="image_2", + metadata={'os_version': '22.04', 'os_distro': 'ubuntu'}, tags=["portalclient"]) +INACTIVE_IMAGE = image_module.Image(id='image_inactive', status='building', name="image_inactive", + metadata={'os_version': '22.04', 'os_distro': 'ubuntu'}, tags=["portalclient"]) + +IMAGES = [ + image_module.Image(id='image_id_1', status='inactive', name="image_1", metadata={'os_version': '22.04', 'os_distro': 'ubuntu'}, + tags=["portalclient"]), + EXPECTED_IMAGE, + image_module.Image(id='image_id_3', status='active', name="image_3", metadata={'os_version': '22.04', 'os_distro': 'centos'}, + tags=["portalclient"]), + INACTIVE_IMAGE +] + + +class TestOpenStackConnector(unittest.TestCase): + + def setUp(self): + # Create an instance of YourClass with a mocked openstack_connection + self.mock_openstack_connection = MagicMock() + with patch.object(OpenStackConnector, "__init__", lambda x, y, z: None): + self.openstack_connector = OpenStackConnector(None, None) + self.openstack_connector.openstack_connection = self.mock_openstack_connection + + def test_get_image(self): + self.mock_openstack_connection.get_image.return_value = EXPECTED_IMAGE + result = self.openstack_connector.get_image(EXPECTED_IMAGE.id) + self.assertEqual(result, EXPECTED_IMAGE) + + def test_get_image_not_active_exception(self): + + # Configure the mock_openstack_connection.get_image to return the not active image + self.mock_openstack_connection.get_image.return_value = INACTIVE_IMAGE + print(f"Name: {INACTIVE_IMAGE.name}") + # Configure the ImageNotFoundException to be raised + with self.assertRaises(ImageNotFoundException) as context: + # Call the method with the not active image ID and set ignore_not_active to False + self.openstack_connector.get_image(name_or_id=INACTIVE_IMAGE.name, ignore_not_active=False) + + # Assert that the exception contains the expected message and image ID + self.assertEqual( + context.exception.message, + f"Image {INACTIVE_IMAGE.name} found but not active!" + ) + + def test_get_images(self): + # Configure the mock_openstack_connection.image.images to return the fake images + self.mock_openstack_connection.image.images.return_value = IMAGES + + # Call the method + result = self.openstack_connector.get_images() + + # Assert that the method returns the expected result + self.assertEqual(result, IMAGES) + + def test_get_active_image_by_os_version(self): + # Generate a set of fake images with different properties + + # Configure the mock_openstack_connection.list_images to return the fake images + self.mock_openstack_connection.list_images.return_value = IMAGES + + # Call the method with specific os_version and os_distro + result = self.openstack_connector.get_active_image_by_os_version(os_version='22.04', os_distro='ubuntu') + + # Assert that the method returns the expected image + self.assertEqual(result, EXPECTED_IMAGE) + + def test_replace_inactive_image(self): + # Generate a fake image with status 'something other than active' + + self.mock_openstack_connection.list_images.return_value = IMAGES + + # Configure the mock_openstack_connection.get_image to return the inactive image + self.mock_openstack_connection.get_image.return_value = INACTIVE_IMAGE + + # Call the method with the inactive image ID and set replace_inactive to True + result = self.openstack_connector.get_image('inactive_id', replace_inactive=True) + + # Assert that the method returns the replacement image + self.assertEqual(result, EXPECTED_IMAGE) + + @unittest.skip("Currently not working") + def test_get_limits(self): + compute_limits = fakes.generate_fake_resource(limits.AbsoluteLimits) + volume_limits = fakes.generate_fake_resource(Limit) + self.mock_openstack_connection.get_compute_limits.return_value = compute_limits + self.mock_openstack_connection.get_volume_limits.return_value = volume_limits + result = self.openstack_connector.get_limits() + + +if __name__ == "__main__": + unittest.main() From 5c96f29b068fcdc33d44132af585be169c5ec0f8 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Tue, 19 Dec 2023 14:13:26 +0100 Subject: [PATCH 03/39] more tests --- simple_vm_client/test_openstack_connector.py | 479 ++++++++++++++++++- 1 file changed, 474 insertions(+), 5 deletions(-) diff --git a/simple_vm_client/test_openstack_connector.py b/simple_vm_client/test_openstack_connector.py index 2eb9cf8..c088c93 100644 --- a/simple_vm_client/test_openstack_connector.py +++ b/simple_vm_client/test_openstack_connector.py @@ -1,13 +1,24 @@ +import inspect +import os +import tempfile import unittest -from unittest.mock import MagicMock, patch +from unittest import mock +from unittest.mock import MagicMock, patch, call + +from openstack.block_storage.v3.volume import Volume +from openstack.cloud import OpenStackCloudException +from openstack.exceptions import ResourceNotFound, ConflictException, ResourceFailure + from openstack_connector.openstack_connector import OpenStackConnector from openstack.test import fakes -from openstack.compute.v2 import limits +from openstack.compute.v2 import limits, server from openstack.compute.v2.image import Image from openstack.block_storage.v3.limits import Limit from openstack.image.v2 import image as image_module -from ttypes import ImageNotFoundException - +from ttypes import ImageNotFoundException, VolumeNotFoundException, DefaultException, SnapshotNotFoundException, \ + ResourceNotAvailableException +from openstack.compute import compute_service +from openstack.compute.v2.server import Server EXPECTED_IMAGE = image_module.Image(id='image_id_2', status='active', name="image_2", metadata={'os_version': '22.04', 'os_distro': 'ubuntu'}, tags=["portalclient"]) INACTIVE_IMAGE = image_module.Image(id='image_inactive', status='building', name="image_inactive", @@ -21,24 +32,217 @@ tags=["portalclient"]), INACTIVE_IMAGE ] +DEFAULT_SECURITY_GROUPS =["defaultSimpleVM"] class TestOpenStackConnector(unittest.TestCase): + @classmethod + def setUpClass(cls): + cls.env_patcher = mock.patch.dict(os.environ, { + "OS_AUTH_URL": "https://example.com", + "OS_USERNAME": "username", + "OS_PASSWORD": "password", + "OS_PROJECT_NAME": "project_name", + "OS_PROJECT_ID": "project_id", + "OS_USER_DOMAIN_NAME": "user_domain", + "OS_PROJECT_DOMAIN_ID": "project_domain_id", + "USE_APPLICATION_CREDENTIALS": "False"} + ) + cls.env_patcher.start() + + super().setUpClass() + + @classmethod + def tearDownClass(cls): + super().tearDownClass() + + cls.env_patcher.stop() def setUp(self): # Create an instance of YourClass with a mocked openstack_connection self.mock_openstack_connection = MagicMock() with patch.object(OpenStackConnector, "__init__", lambda x, y, z: None): self.openstack_connector = OpenStackConnector(None, None) self.openstack_connector.openstack_connection = self.mock_openstack_connection + self.openstack_connector.DEFAULT_SECURITY_GROUPS=DEFAULT_SECURITY_GROUPS + + def init_openstack_connector(self): + with patch.object(OpenStackConnector, "__init__", lambda x, y, z: None): + openstack_connector = OpenStackConnector(None, None) + openstack_connector.openstack_connection = self.mock_openstack_connection + openstack_connector.DEFAULT_SECURITY_GROUPS = DEFAULT_SECURITY_GROUPS + return openstack_connector + def test_load_config_yml(self): + # Create a temporary YAML file with sample configuration + with tempfile.NamedTemporaryFile(mode='w+', delete=False) as temp_file: + temp_file.write(""" + openstack: + gateway_ip: "192.168.1.1" + network: "my_network" + sub_network: "my_sub_network" + cloud_site: "my_cloud_site" + ssh_port_calculation: 22 + udp_port_calculation: 12345 + gateway_security_group_id: "security_group_id" + production: true + forc: + forc_security_group_id: "forc_security_group_id" + """) + + # Call the load_config_yml method with the temporary file path + self.openstack_connector.load_config_yml(temp_file.name) + + # Assert that the configuration attributes are set correctly + self.assertEqual(self.openstack_connector.GATEWAY_IP, "192.168.1.1") + self.assertEqual(self.openstack_connector.NETWORK, "my_network") + self.assertEqual(self.openstack_connector.SUB_NETWORK, "my_sub_network") + self.assertTrue(self.openstack_connector.PRODUCTION) + self.assertEqual(self.openstack_connector.CLOUD_SITE, "my_cloud_site") + self.assertEqual(self.openstack_connector.SSH_PORT_CALCULATION, 22) + self.assertEqual(self.openstack_connector.UDP_PORT_CALCULATION, 12345) + self.assertEqual(self.openstack_connector.FORC_SECURITY_GROUP_ID, "forc_security_group_id") + + @patch('openstack_connector.openstack_connector.logger') + def test_load_env_config_username_password(self, mock_logger): + openstack_connector = self.init_openstack_connector() + + # Call the load_env_config method + openstack_connector.load_env_config() + + # Assert that attributes are set correctly + self.assertEqual(openstack_connector.AUTH_URL, "https://example.com") + self.assertFalse(openstack_connector.USE_APPLICATION_CREDENTIALS) + self.assertEqual(openstack_connector.USERNAME, "username") + self.assertEqual(openstack_connector.PASSWORD, "password") + self.assertEqual(openstack_connector.PROJECT_NAME, "project_name") + self.assertEqual(openstack_connector.PROJECT_ID, "project_id") + self.assertEqual(openstack_connector.USER_DOMAIN_NAME, "user_domain") + self.assertEqual(openstack_connector.PROJECT_DOMAIN_ID, "project_domain_id") + + # Assert that logger.info was called with the expected message + mock_logger.info.assert_called_once_with("Load environment config: OpenStack") + + @patch('openstack_connector.openstack_connector.logger') + @patch.dict(os.environ, { + "OS_AUTH_URL": "https://example.com", + "USE_APPLICATION_CREDENTIALS": "True", + "OS_APPLICATION_CREDENTIAL_ID": "app_cred_id", + "OS_APPLICATION_CREDENTIAL_SECRET": "app_cred_secret" + }) + def test_load_env_config_application_credentials(self, mock_logger): + # Create an instance of OpenStackConnector + openstack_connector = self.init_openstack_connector() + + # Call the load_env_config method + openstack_connector.load_env_config() + + # Assert that attributes are set correctly for application credentials + self.assertEqual(openstack_connector.AUTH_URL, "https://example.com") + self.assertTrue(openstack_connector.USE_APPLICATION_CREDENTIALS) + self.assertEqual(openstack_connector.APPLICATION_CREDENTIAL_ID, "app_cred_id") + self.assertEqual(openstack_connector.APPLICATION_CREDENTIAL_SECRET, "app_cred_secret") + + # Assert that logger.info was called with the expected messages + expected_calls = [ + call("Load environment config: OpenStack"), + call("APPLICATION CREDENTIALS will be used!") + ] + mock_logger.info.assert_has_calls(expected_calls, any_order=False) + @patch('openstack_connector.openstack_connector.logger') + @patch.dict(os.environ, { + "OS_AUTH_URL": ""}) + def test_load_env_config_missing_os_auth_url(self, mock_logger): + openstack_connector = self.init_openstack_connector() + + # Mock sys.exit to capture the exit status + with patch('sys.exit') as mock_exit: + # Call the load_env_config method + openstack_connector.load_env_config() + # Assert that logger.error was called with the expected message + mock_logger.error.assert_called_once_with("OS_AUTH_URL not provided in env!") + + # Assert that sys.exit was called with status code 1 + mock_exit.assert_called_once_with(1) + @patch('openstack_connector.openstack_connector.logger') + @patch.dict(os.environ, {"USE_APPLICATION_CREDENTIALS": "True"}) + def test_load_env_config_missing_app_cred_vars(self, mock_logger): + # Create an instance of OpenStackConnector + openstack_connector = self.init_openstack_connector() + + # Mock sys.exit to capture the exit status + with patch('sys.exit') as mock_exit: + # Call the load_env_config method + openstack_connector.load_env_config() + + # Assert that logger.error was called with the expected message + expected_error_message = "Usage of Application Credentials enabled - but OS_APPLICATION_CREDENTIAL_ID not provided in env!" + mock_logger.error.assert_called_once_with(expected_error_message) + + # Assert that sys.exit was called with status code 1 + mock_exit.assert_called_once_with(1) + + @patch('openstack_connector.openstack_connector.logger') + @patch.dict(os.environ, { + "USE_APPLICATION_CREDENTIALS": "False", + "OS_USERNAME": "test_username", + "OS_PASSWORD": "test_password", + "OS_PROJECT_NAME": "test_project_name", + "OS_PROJECT_ID": "test_project_id", + "OS_USER_DOMAIN_NAME": "test_user_domain", + "OS_PROJECT_DOMAIN_ID": "test_project_domain" + }) + def test_load_env_config_missing_username_password_vars(self, mock_logger): + # Create an instance of OpenStackConnector using the helper method + openstack_connector = self.init_openstack_connector() + + # Remove required environment variables + del os.environ["OS_USERNAME"] + del os.environ["OS_PASSWORD"] + + # Mock sys.exit to capture the exit status + with patch('sys.exit') as mock_exit: + # Call the load_env_config method + openstack_connector.load_env_config() + + # Assert that logger.error was called with the expected message + expected_error_message = "Usage of Username/Password enabled - but keys OS_USERNAME, OS_PASSWORD not provided in env!" + mock_logger.error.assert_called_once_with(expected_error_message) + + # Assert that sys.exit was called with status code 1 + mock_exit.assert_called_once_with(1) + def test_get_default_security_groups(self): + # Call the _get_default_security_groups method + default_security_groups = self.openstack_connector._get_default_security_groups() + + # Assert that the returned list is a copy of the DEFAULT_SECURITY_GROUPS attribute + self.assertEqual(default_security_groups, self.openstack_connector.DEFAULT_SECURITY_GROUPS) + + # Assert that modifying the returned list does not affect the DEFAULT_SECURITY_GROUPS attribute + default_security_groups.append("new_security_group") + self.assertNotEqual(default_security_groups, self.openstack_connector.DEFAULT_SECURITY_GROUPS) def test_get_image(self): self.mock_openstack_connection.get_image.return_value = EXPECTED_IMAGE result = self.openstack_connector.get_image(EXPECTED_IMAGE.id) self.assertEqual(result, EXPECTED_IMAGE) - def test_get_image_not_active_exception(self): + def test_get_image_not_found_exception(self): + # Configure the mock_openstack_connection.get_image to return None + self.mock_openstack_connection.get_image.return_value = None + + # Configure the ImageNotFoundException to be raised + with self.assertRaises(ImageNotFoundException) as context: + # Call the method with an image ID that will not be found + self.openstack_connector.get_image('nonexistent_id', ignore_not_found=False) + + # Assert that the exception contains the expected message and image ID + self.assertEqual( + context.exception.message, + "Image nonexistent_id not found!" + ) + + def test_get_image_not_active_exception(self): # Configure the mock_openstack_connection.get_image to return the not active image self.mock_openstack_connection.get_image.return_value = INACTIVE_IMAGE print(f"Name: {INACTIVE_IMAGE.name}") @@ -75,6 +279,21 @@ def test_get_active_image_by_os_version(self): # Assert that the method returns the expected image self.assertEqual(result, EXPECTED_IMAGE) + def test_get_active_image_by_os_version_not_found_exception(self): + # Configure the mock_openstack_connection.list_images to return an empty list + self.mock_openstack_connection.list_images.return_value = [] + + # Configure the ImageNotFoundException to be raised + with self.assertRaises(ImageNotFoundException) as context: + # Call the method with an os_version and os_distro that won't find a matching image + self.openstack_connector.get_active_image_by_os_version('nonexistent_version', 'nonexistent_distro') + + # Assert that the exception contains the expected message + self.assertEqual( + context.exception.message, + "Old Image was deactivated! No image with os_version:nonexistent_version and os_distro:nonexistent_distro found!" + ) + def test_replace_inactive_image(self): # Generate a fake image with status 'something other than active' @@ -97,6 +316,256 @@ def test_get_limits(self): self.mock_openstack_connection.get_volume_limits.return_value = volume_limits result = self.openstack_connector.get_limits() + @patch("openstack_connector.openstack_connector.logger.info") + @patch("openstack_connector.openstack_connector.logger.error") + def test_create_server(self, mock_logger_error, mock_logger_info): + # Prepare test data + name = "test_server" + image_id = "test_image_id" + flavor_id = "test_flavor_id" + network_id = "test_network_id" + userdata = "test_userdata" + key_name = "test_key" + metadata = {"key1": "value1", "key2": "value2"} + security_groups = ["group1", "group2"] + + # Mock the create_server method to return a fake server object + fake_server = Server(**{"id": "fake_server_id", "name": name}) + print(f"test : {fake_server}") + self.mock_openstack_connection.create_server.return_value = fake_server + + # Call the create_server method + result = self.openstack_connector.create_server( + name, image_id, flavor_id, network_id, userdata, key_name, + metadata, security_groups + ) + + # Check if the method logs the correct information + mock_logger_info.assert_called_once_with( + f"Create Server:\n\tname: {name}\n\timage_id:{image_id}\n\tflavor_id:{flavor_id}\n\tmetadata:{metadata}" + ) + + # Check if the create_server method on openstack_connection was called with the expected parameters + self.mock_openstack_connection.create_server.assert_called_once_with( + name=name, + image=image_id, + flavor=flavor_id, + network=[network_id], + userdata=userdata, + key_name=key_name, + meta=metadata, + security_groups=security_groups, + ) + + # Check if the method returns the fake server object + self.assertEqual(result, fake_server) + + @patch("openstack_connector.openstack_connector.logger.info") + @patch("openstack_connector.openstack_connector.logger.exception") + def test_get_volume(self, mock_logger_exception, mock_logger_info): + # Prepare test data + name_or_id = "test_volume_id" + + # Mock the get_volume method to return a fake volume object + fake_volume = Volume(**{"id": "fake_volume_id", "name": "test_volume"}) + self.mock_openstack_connection.get_volume.return_value = fake_volume + + # Call the get_volume method + result = self.openstack_connector.get_volume(name_or_id) + + # Check if the method logs the correct information + mock_logger_info.assert_called_once_with(f"Get Volume {name_or_id}") + + # Check if the get_volume method on openstack_connection was called with the expected parameters + self.mock_openstack_connection.get_volume.assert_called_once_with(name_or_id=name_or_id) + + # Check if the method returns the fake volume object + self.assertEqual(result, fake_volume) + + @patch("openstack_connector.openstack_connector.logger.exception") + def test_get_volume_exception(self, mock_logger_exception): + # Prepare test data + name_or_id = "non_existing_volume_id" + + # Mock the get_volume method to return None + self.mock_openstack_connection.get_volume.return_value = None + + # Call the get_volume method and expect a VolumeNotFoundException + with self.assertRaises(Exception): # Replace Exception with the actual exception type + self.openstack_connector.get_volume(name_or_id) + + # Check if the method logs the correct exception information + mock_logger_exception.assert_called_once_with(f"No Volume with id {name_or_id}") + + @patch("openstack_connector.openstack_connector.logger.info") + @patch("openstack_connector.openstack_connector.logger.exception") + def test_delete_volume(self, mock_logger_exception, mock_logger_info): + # Prepare test data + volume_id = "test_volume_id" + + # Mock the delete_volume method to avoid actual deletion in the test + self.mock_openstack_connection.delete_volume.side_effect = [ + None, # No exception case + ResourceNotFound(message="Volume not found"), # VolumeNotFoundException case + ConflictException(message="Delete volume failed"), # OpenStackCloudException case + OpenStackCloudException(message="Some other exception") # DefaultException case + ] + + # Call the delete_volume method for different scenarios + # 1. No exception + self.openstack_connector.delete_volume(volume_id) + mock_logger_info.assert_called_once_with(f"Delete Volume {volume_id}") + mock_logger_exception.assert_not_called() + + # 2. ResourceNotFound, expect VolumeNotFoundException + with self.assertRaises(VolumeNotFoundException): # Replace Exception with the actual exception type + self.openstack_connector.delete_volume(volume_id) + mock_logger_exception.assert_called_with(f"No Volume with id {volume_id}") + + # 3. ConflictException, expect OpenStackCloudException + with self.assertRaises(OpenStackCloudException): # Replace Exception with the actual exception type + self.openstack_connector.delete_volume(volume_id) + mock_logger_exception.assert_called_with(f"Delete volume: {volume_id}) failed!") + + # 4. OpenStackCloudException, expect DefaultException + with self.assertRaises(DefaultException): # Replace Exception with the actual exception type + self.openstack_connector.delete_volume(volume_id) + + @patch("openstack_connector.openstack_connector.logger.info") + @patch("openstack_connector.openstack_connector.logger.error") + def test_create_volume_snapshot(self, mock_logger_error, mock_logger_info): + # Prepare test data + volume_id = "test_volume_id" + snapshot_name = "test_snapshot" + snapshot_description = "Test snapshot description" + + # Mock the create_volume_snapshot method to avoid actual creation in the test + self.mock_openstack_connection.create_volume_snapshot.side_effect = [ + {"id": "snapshot_id"}, # No exception case + ResourceNotFound(message="Volume not found"), # VolumeNotFoundException case + OpenStackCloudException(message="Some other exception") # DefaultException case + ] + + # Call the create_volume_snapshot method for different scenarios + # 1. No exception + snapshot_id = self.openstack_connector.create_volume_snapshot( + volume_id, snapshot_name, snapshot_description + ) + self.assertEqual(snapshot_id, "snapshot_id") + mock_logger_info.assert_called_once_with(f"Create Snapshot for Volume {volume_id}") + + # 2. ResourceNotFound, expect VolumeNotFoundException + with self.assertRaises(VolumeNotFoundException): + self.openstack_connector.create_volume_snapshot( + volume_id, snapshot_name, snapshot_description + ) + mock_logger_error.assert_called_with(f"No Volume with id {volume_id}") + + # 3. OpenStackCloudException, expect DefaultException + with self.assertRaises(DefaultException): + self.openstack_connector.create_volume_snapshot( + volume_id, snapshot_name, snapshot_description + ) + + + @patch("openstack_connector.openstack_connector.logger.info") + @patch("openstack_connector.openstack_connector.logger.exception") + def test_get_volume_snapshot(self, mock_logger_exception, mock_logger_info): + # Prepare test data + snapshot_id = "test_snapshot_id" + + # Mock the get_volume_snapshot method to avoid actual retrieval in the test + self.mock_openstack_connection.get_volume_snapshot.side_effect = [ + {"id": snapshot_id}, # No exception case + None, # VolumeNotFoundException case + ] + + # Call the get_volume_snapshot method for different scenarios + # 1. No exception + snapshot = self.openstack_connector.get_volume_snapshot(snapshot_id) + self.assertEqual(snapshot["id"], snapshot_id) + mock_logger_info.assert_called_once_with(f"Get volume Snapshot {snapshot_id}") + + # 2. None returned, expect VolumeNotFoundException + with self.assertRaises(VolumeNotFoundException): + self.openstack_connector.get_volume_snapshot(snapshot_id) + mock_logger_exception.assert_called_with(f"No volume Snapshot with id {snapshot_id}") + + @patch("openstack_connector.openstack_connector.logger.info") + @patch("openstack_connector.openstack_connector.logger.exception") + def test_delete_volume_snapshot(self, mock_logger_exception, mock_logger_info): + # Prepare test data + snapshot_id = "test_snapshot_id" + + # Mock the delete_volume_snapshot method to avoid actual deletion in the test + self.mock_openstack_connection.delete_volume_snapshot.side_effect = [ + None, # No exception case + ResourceNotFound(message="Snapshot not found"), # SnapshotNotFoundException case + ConflictException(message="Delete snapshot failed"), # OpenStackCloudException case + OpenStackCloudException(message="Some other exception") # DefaultException case + ] + + # Call the delete_volume_snapshot method for different scenarios + # 1. No exception + self.openstack_connector.delete_volume_snapshot(snapshot_id) + mock_logger_info.assert_called_once_with(f"Delete volume Snapshot {snapshot_id}") + + # 2. ResourceNotFound, expect SnapshotNotFoundException + with self.assertRaises(SnapshotNotFoundException): # Replace Exception with the actual exception type + self.openstack_connector.delete_volume_snapshot(snapshot_id) + mock_logger_exception.assert_called_with(f"Snapshot not found: {snapshot_id}") + + # 3. ConflictException, expect OpenStackCloudException + with self.assertRaises(OpenStackCloudException): # Replace Exception with the actual exception type + self.openstack_connector.delete_volume_snapshot(snapshot_id) + mock_logger_exception.assert_called_with(f"Delete volume snapshot: {snapshot_id}) failed!") + + # 4. OpenStackCloudException, expect DefaultException + with self.assertRaises(DefaultException): # Replace Exception with the actual exception type + self.openstack_connector.delete_volume_snapshot(snapshot_id) + + @patch("openstack_connector.openstack_connector.logger.info") + def test_get_servers(self, mock_logger_info): + # Prepare test data + expected_servers = fakes.generate_fake_resources(server.Server, count=3) + + # Mock the list_servers method to simulate fetching servers + self.mock_openstack_connection.list_servers.return_value = expected_servers + + # Call the get_servers method + result_servers = self.openstack_connector.get_servers() + + # Assertions + self.assertEqual(result_servers, expected_servers) + mock_logger_info.assert_called_once_with("Get servers") + @patch("openstack_connector.openstack_connector.logger.error") + @patch("openstack_connector.openstack_connector.logger.exception") + @patch("openstack_connector.openstack_connector.logger.info") + def test_get_servers_by_ids(self, mock_logger_info, mock_logger_exception, mock_logger_error): + # Prepare test data + server_ids = ["id1", "id2", "id3", "id4"] + expected_servers = [Server(id="id1"), Server(id="id2")] + + # Mock the get_server_by_id method to simulate fetching servers + self.mock_openstack_connection.get_server_by_id.side_effect = [ + expected_servers[0], # Server found + expected_servers[1], # Server found + None, # Server not found + Exception, + ] + + # Call the get_servers_by_ids method + result_servers = self.openstack_connector.get_servers_by_ids(server_ids) + + # Assertions + self.assertEqual(result_servers, expected_servers) # Exclude the None case + #mock_logger_info.assert_any_call(f"Get Servers by IDS : {server_ids}") + mock_logger_info.assert_any_call("Get server id1") + mock_logger_info.assert_any_call("Get server id2") + mock_logger_info.assert_any_call("Get server id3") + mock_logger_info.assert_any_call("Get server id4") + mock_logger_error.assert_called_once_with("Requested VM id3 not found!") + mock_logger_exception.assert_called_once_with("Requested VM id4 not found!\n ") if __name__ == "__main__": unittest.main() From 11cfbf579ae03f90ce765088b8919d9d4a0c6ae8 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Tue, 19 Dec 2023 15:52:23 +0100 Subject: [PATCH 04/39] more tests --- .../openstack_connector.py | 22 +- simple_vm_client/scripts/__init__.py,cover | 0 .../scripts/bash/add_keys_to_authorized.sh | 2 +- simple_vm_client/test_openstack_connector.py | 422 ++++++++++++++++-- 4 files changed, 398 insertions(+), 48 deletions(-) create mode 100644 simple_vm_client/scripts/__init__.py,cover diff --git a/simple_vm_client/openstack_connector/openstack_connector.py b/simple_vm_client/openstack_connector/openstack_connector.py index cff9d49..ea28207 100644 --- a/simple_vm_client/openstack_connector/openstack_connector.py +++ b/simple_vm_client/openstack_connector/openstack_connector.py @@ -209,17 +209,18 @@ def get_volume(self, name_or_id: str) -> Volume: logger.info(f"Get Volume {name_or_id}") volume: Volume = self.openstack_connection.get_volume(name_or_id=name_or_id) if volume is None: - logger.exception(f"No Volume with id {name_or_id} ") + logger.exception(f"No Volume with id {name_or_id}") raise VolumeNotFoundException( - message=f"No Volume with id {name_or_id} ", name_or_id=name_or_id + message=f"No Volume with id {name_or_id}", name_or_id=name_or_id ) return volume def delete_volume(self, volume_id: str) -> None: try: - logger.info(f"Delete Volume {volume_id} ") + logger.info(f"Delete Volume {volume_id}") self.openstack_connection.delete_volume(name_or_id=volume_id) except ResourceNotFound as e: + logger.exception(f"No Volume with id {volume_id}") raise VolumeNotFoundException(message=e.message, name_or_id=volume_id) except ConflictException as e: @@ -238,6 +239,7 @@ def create_volume_snapshot( ) return volume_snapshot["id"] except ResourceNotFound as e: + logger.error(f"No Volume with id {volume_id}") raise VolumeNotFoundException(message=e.message, name_or_id=volume_id) except OpenStackCloudException as e: raise DefaultException(message=e.message) @@ -248,18 +250,20 @@ def get_volume_snapshot(self, name_or_id: str) -> Snapshot: name_or_id=name_or_id ) if snapshot is None: - logger.exception(f"No volume Snapshot with id {name_or_id} ") + logger.exception(f"No volume Snapshot with id {name_or_id}") raise VolumeNotFoundException( - message=f"No volume Snapshot with id {name_or_id} ", + message=f"No volume Snapshot with id {name_or_id}", name_or_id=name_or_id, ) return snapshot def delete_volume_snapshot(self, snapshot_id: str) -> None: try: - logger.info(f"Delete volume Snapshot {snapshot_id} ") + logger.info(f"Delete volume Snapshot {snapshot_id}") self.openstack_connection.delete_volume_snapshot(name_or_id=snapshot_id) except ResourceNotFound as e: + logger.exception(f"Snapshot not found: {snapshot_id}") + raise SnapshotNotFoundException(message=e.message, name_or_id=snapshot_id) except ConflictException as e: @@ -312,7 +316,11 @@ def get_servers_by_ids(self, ids: list[str]) -> list[Server]: logger.info(f"Get server {server_id}") try: server = self.openstack_connection.get_server_by_id(server_id) - servers.append(server) + if server: + servers.append(server) + else: + logger.error(f"Requested VM {server_id} not found!") + except Exception as e: logger.exception(f"Requested VM {server_id} not found!\n {e}") diff --git a/simple_vm_client/scripts/__init__.py,cover b/simple_vm_client/scripts/__init__.py,cover new file mode 100644 index 0000000..e69de29 diff --git a/simple_vm_client/scripts/bash/add_keys_to_authorized.sh b/simple_vm_client/scripts/bash/add_keys_to_authorized.sh index df6d994..254b0f6 100644 --- a/simple_vm_client/scripts/bash/add_keys_to_authorized.sh +++ b/simple_vm_client/scripts/bash/add_keys_to_authorized.sh @@ -5,4 +5,4 @@ for ix in ${!keys_to_add[*]} do printf "\n%s" "${keys_to_add[$ix]}" >> /home/ubuntu/.ssh/authorized_keys -done +done \ No newline at end of file diff --git a/simple_vm_client/test_openstack_connector.py b/simple_vm_client/test_openstack_connector.py index c088c93..355e163 100644 --- a/simple_vm_client/test_openstack_connector.py +++ b/simple_vm_client/test_openstack_connector.py @@ -1,24 +1,29 @@ import inspect import os +import socket import tempfile import unittest from unittest import mock from unittest.mock import MagicMock, patch, call +from openstack.block_storage.v3 import volume from openstack.block_storage.v3.volume import Volume from openstack.cloud import OpenStackCloudException from openstack.exceptions import ResourceNotFound, ConflictException, ResourceFailure +from openstack.network.v2.network import Network +from oslo_utils import encodeutils from openstack_connector.openstack_connector import OpenStackConnector from openstack.test import fakes -from openstack.compute.v2 import limits, server +from openstack.compute.v2 import limits, server, keypair, flavor from openstack.compute.v2.image import Image from openstack.block_storage.v3.limits import Limit from openstack.image.v2 import image as image_module from ttypes import ImageNotFoundException, VolumeNotFoundException, DefaultException, SnapshotNotFoundException, \ - ResourceNotAvailableException + ResourceNotAvailableException, OpenStackConflictException from openstack.compute import compute_service from openstack.compute.v2.server import Server + EXPECTED_IMAGE = image_module.Image(id='image_id_2', status='active', name="image_2", metadata={'os_version': '22.04', 'os_distro': 'ubuntu'}, tags=["portalclient"]) INACTIVE_IMAGE = image_module.Image(id='image_inactive', status='building', name="image_inactive", @@ -32,7 +37,20 @@ tags=["portalclient"]), INACTIVE_IMAGE ] -DEFAULT_SECURITY_GROUPS =["defaultSimpleVM"] +DEFAULT_SECURITY_GROUPS = ["defaultSimpleVM"] +CONFIG_DATA = """ + openstack: + gateway_ip: "192.168.1.1" + network: "my_network" + sub_network: "my_sub_network" + cloud_site: "my_cloud_site" + ssh_port_calculation: 22 + udp_port_calculation: 12345 + gateway_security_group_id: "security_group_id" + production: true + forc: + forc_security_group_id: "forc_security_group_id" + """ class TestOpenStackConnector(unittest.TestCase): @@ -40,15 +58,15 @@ class TestOpenStackConnector(unittest.TestCase): @classmethod def setUpClass(cls): cls.env_patcher = mock.patch.dict(os.environ, { - "OS_AUTH_URL": "https://example.com", - "OS_USERNAME": "username", - "OS_PASSWORD": "password", - "OS_PROJECT_NAME": "project_name", - "OS_PROJECT_ID": "project_id", - "OS_USER_DOMAIN_NAME": "user_domain", - "OS_PROJECT_DOMAIN_ID": "project_domain_id", - "USE_APPLICATION_CREDENTIALS": "False"} - ) + "OS_AUTH_URL": "https://example.com", + "OS_USERNAME": "username", + "OS_PASSWORD": "password", + "OS_PROJECT_NAME": "project_name", + "OS_PROJECT_ID": "project_id", + "OS_USER_DOMAIN_NAME": "user_domain", + "OS_PROJECT_DOMAIN_ID": "project_domain_id", + "USE_APPLICATION_CREDENTIALS": "False"} + ) cls.env_patcher.start() super().setUpClass() @@ -58,13 +76,14 @@ def tearDownClass(cls): super().tearDownClass() cls.env_patcher.stop() + def setUp(self): # Create an instance of YourClass with a mocked openstack_connection self.mock_openstack_connection = MagicMock() with patch.object(OpenStackConnector, "__init__", lambda x, y, z: None): self.openstack_connector = OpenStackConnector(None, None) self.openstack_connector.openstack_connection = self.mock_openstack_connection - self.openstack_connector.DEFAULT_SECURITY_GROUPS=DEFAULT_SECURITY_GROUPS + self.openstack_connector.DEFAULT_SECURITY_GROUPS = DEFAULT_SECURITY_GROUPS def init_openstack_connector(self): with patch.object(OpenStackConnector, "__init__", lambda x, y, z: None): @@ -72,22 +91,11 @@ def init_openstack_connector(self): openstack_connector.openstack_connection = self.mock_openstack_connection openstack_connector.DEFAULT_SECURITY_GROUPS = DEFAULT_SECURITY_GROUPS return openstack_connector + def test_load_config_yml(self): # Create a temporary YAML file with sample configuration with tempfile.NamedTemporaryFile(mode='w+', delete=False) as temp_file: - temp_file.write(""" - openstack: - gateway_ip: "192.168.1.1" - network: "my_network" - sub_network: "my_sub_network" - cloud_site: "my_cloud_site" - ssh_port_calculation: 22 - udp_port_calculation: 12345 - gateway_security_group_id: "security_group_id" - production: true - forc: - forc_security_group_id: "forc_security_group_id" - """) + temp_file.write(CONFIG_DATA) # Call the load_config_yml method with the temporary file path self.openstack_connector.load_config_yml(temp_file.name) @@ -148,6 +156,7 @@ def test_load_env_config_application_credentials(self, mock_logger): call("APPLICATION CREDENTIALS will be used!") ] mock_logger.info.assert_has_calls(expected_calls, any_order=False) + @patch('openstack_connector.openstack_connector.logger') @patch.dict(os.environ, { "OS_AUTH_URL": ""}) @@ -163,6 +172,7 @@ def test_load_env_config_missing_os_auth_url(self, mock_logger): # Assert that sys.exit was called with status code 1 mock_exit.assert_called_once_with(1) + @patch('openstack_connector.openstack_connector.logger') @patch.dict(os.environ, {"USE_APPLICATION_CREDENTIALS": "True"}) def test_load_env_config_missing_app_cred_vars(self, mock_logger): @@ -210,6 +220,7 @@ def test_load_env_config_missing_username_password_vars(self, mock_logger): # Assert that sys.exit was called with status code 1 mock_exit.assert_called_once_with(1) + def test_get_default_security_groups(self): # Call the _get_default_security_groups method default_security_groups = self.openstack_connector._get_default_security_groups() @@ -221,13 +232,15 @@ def test_get_default_security_groups(self): default_security_groups.append("new_security_group") self.assertNotEqual(default_security_groups, self.openstack_connector.DEFAULT_SECURITY_GROUPS) - def test_get_image(self): + @patch("openstack_connector.openstack_connector.logger.info") + def test_get_image(self,mock_logger_info): self.mock_openstack_connection.get_image.return_value = EXPECTED_IMAGE result = self.openstack_connector.get_image(EXPECTED_IMAGE.id) + mock_logger_info.assert_called_once_with(f"Get Image {EXPECTED_IMAGE.id}") self.assertEqual(result, EXPECTED_IMAGE) + @patch("openstack_connector.openstack_connector.logger.info") - - def test_get_image_not_found_exception(self): + def test_get_image_not_found_exception(self,mock_logger_info): # Configure the mock_openstack_connection.get_image to return None self.mock_openstack_connection.get_image.return_value = None @@ -235,58 +248,72 @@ def test_get_image_not_found_exception(self): with self.assertRaises(ImageNotFoundException) as context: # Call the method with an image ID that will not be found self.openstack_connector.get_image('nonexistent_id', ignore_not_found=False) + mock_logger_info.assert_called_once_with(f"Get Image nonexistent_id") # Assert that the exception contains the expected message and image ID self.assertEqual( context.exception.message, "Image nonexistent_id not found!" ) - - def test_get_image_not_active_exception(self): + @patch("openstack_connector.openstack_connector.logger.info") + def test_get_image_not_active_exception(self,mock_logger_info): # Configure the mock_openstack_connection.get_image to return the not active image self.mock_openstack_connection.get_image.return_value = INACTIVE_IMAGE - print(f"Name: {INACTIVE_IMAGE.name}") # Configure the ImageNotFoundException to be raised with self.assertRaises(ImageNotFoundException) as context: # Call the method with the not active image ID and set ignore_not_active to False self.openstack_connector.get_image(name_or_id=INACTIVE_IMAGE.name, ignore_not_active=False) + mock_logger_info.assert_called_once_with(f"Get Image {INACTIVE_IMAGE.name}") # Assert that the exception contains the expected message and image ID self.assertEqual( context.exception.message, f"Image {INACTIVE_IMAGE.name} found but not active!" ) + @patch("openstack_connector.openstack_connector.logger.info") - def test_get_images(self): + def test_get_images(self,mock_logger_info): # Configure the mock_openstack_connection.image.images to return the fake images self.mock_openstack_connection.image.images.return_value = IMAGES # Call the method result = self.openstack_connector.get_images() + mock_logger_info.assert_any_call("Get Images") + image_names = [image.name for image in IMAGES] + + mock_logger_info.assert_any_call(f"Found images - {image_names}") + # Assert that the method returns the expected result self.assertEqual(result, IMAGES) - - def test_get_active_image_by_os_version(self): + @patch("openstack_connector.openstack_connector.logger.info") + def test_get_active_image_by_os_version(self,mock_logger_info): # Generate a set of fake images with different properties + os_version='22.04' # Configure the mock_openstack_connection.list_images to return the fake images self.mock_openstack_connection.list_images.return_value = IMAGES # Call the method with specific os_version and os_distro - result = self.openstack_connector.get_active_image_by_os_version(os_version='22.04', os_distro='ubuntu') + result = self.openstack_connector.get_active_image_by_os_version(os_version=os_version, os_distro='ubuntu') + mock_logger_info.assert_called_with(f"Get active Image by os-version: {os_version}") + # Assert that the method returns the expected image self.assertEqual(result, EXPECTED_IMAGE) - def test_get_active_image_by_os_version_not_found_exception(self): + @patch("openstack_connector.openstack_connector.logger.info") + def test_get_active_image_by_os_version_not_found_exception(self,mock_logger_info): # Configure the mock_openstack_connection.list_images to return an empty list self.mock_openstack_connection.list_images.return_value = [] + os_version='nonexistent_version' # Configure the ImageNotFoundException to be raised with self.assertRaises(ImageNotFoundException) as context: # Call the method with an os_version and os_distro that won't find a matching image - self.openstack_connector.get_active_image_by_os_version('nonexistent_version', 'nonexistent_distro') + self.openstack_connector.get_active_image_by_os_version(os_version ,'nonexistent_distro') + mock_logger_info.assert_called_with(f"Get active Image by os-version: {os_version}") + # Assert that the exception contains the expected message self.assertEqual( @@ -467,7 +494,6 @@ def test_create_volume_snapshot(self, mock_logger_error, mock_logger_info): volume_id, snapshot_name, snapshot_description ) - @patch("openstack_connector.openstack_connector.logger.info") @patch("openstack_connector.openstack_connector.logger.exception") def test_get_volume_snapshot(self, mock_logger_exception, mock_logger_info): @@ -538,6 +564,7 @@ def test_get_servers(self, mock_logger_info): # Assertions self.assertEqual(result_servers, expected_servers) mock_logger_info.assert_called_once_with("Get servers") + @patch("openstack_connector.openstack_connector.logger.error") @patch("openstack_connector.openstack_connector.logger.exception") @patch("openstack_connector.openstack_connector.logger.info") @@ -559,7 +586,7 @@ def test_get_servers_by_ids(self, mock_logger_info, mock_logger_exception, mock_ # Assertions self.assertEqual(result_servers, expected_servers) # Exclude the None case - #mock_logger_info.assert_any_call(f"Get Servers by IDS : {server_ids}") + mock_logger_info.assert_any_call(f"Get Servers by IDS : {server_ids}") mock_logger_info.assert_any_call("Get server id1") mock_logger_info.assert_any_call("Get server id2") mock_logger_info.assert_any_call("Get server id3") @@ -567,5 +594,320 @@ def test_get_servers_by_ids(self, mock_logger_info, mock_logger_exception, mock_ mock_logger_error.assert_called_once_with("Requested VM id3 not found!") mock_logger_exception.assert_called_once_with("Requested VM id4 not found!\n ") + @patch("openstack_connector.openstack_connector.logger.exception") + @patch("openstack_connector.openstack_connector.logger.info") + def test_attach_volume_to_server(self, mock_logger_info, mock_logger_exception): + # Prepare test data + expected_attachment = {"device": "/dev/vdb"} # Replace with actual attachment details + expected_server = fakes.generate_fake_resource(server.Server) + expected_volume = fakes.generate_fake_resource(volume.Volume) + openstack_id = expected_server.id + volume_id = expected_volume.id + + # Mock the get_server and get_volume methods + self.mock_openstack_connection.attach_volume.return_value = expected_attachment + self.mock_openstack_connection.get_server_by_id.return_value = expected_server # Replace with actual Server instance + self.mock_openstack_connection.get_volume.return_value = expected_volume # Replace with actual Volume instance + + # Call the attach_volume_to_server method + result_attachment = self.openstack_connector.attach_volume_to_server(openstack_id, volume_id) + + # Assertions + self.assertEqual(result_attachment, expected_attachment) + mock_logger_info.assert_called_with(f"Attaching volume {volume_id} to virtualmachine {openstack_id}") + self.mock_openstack_connection.get_server_by_id.assert_called_once_with(id=openstack_id) + self.mock_openstack_connection.get_volume.assert_called_once_with(name_or_id=volume_id) + self.mock_openstack_connection.attach_volume.assert_called_once_with(server=expected_server, volume=expected_volume) + + # Test exception case + self.mock_openstack_connection.attach_volume.side_effect = ConflictException(message="Conflict error") + with self.assertRaises(OpenStackConflictException): + self.openstack_connector.attach_volume_to_server(openstack_id, volume_id) + mock_logger_exception.assert_called_once_with( + f"Trying to attach volume {volume_id} to vm {openstack_id} error failed!", + exc_info=True, + ) + + @patch("openstack_connector.openstack_connector.logger.exception") + @patch("openstack_connector.openstack_connector.logger.info") + def test_detach_volume(self, mock_logger_info, mock_logger_exception): + # Prepare test data + expected_server = fakes.generate_fake_resource(server.Server) + expected_volume = fakes.generate_fake_resource(volume.Volume) + server_id = expected_server.id + volume_id = expected_volume.id + + # Mock the get_volume, get_server, and detach_volume methods + self.mock_openstack_connection.get_volume.return_value = expected_volume # Replace with actual Volume instance + self.mock_openstack_connection.get_server_by_id.return_value = expected_server # Replace with actual Server instance + # Call the detach_volume method + self.openstack_connector.detach_volume(volume_id, server_id) + + # Assertions + mock_logger_info.assert_any_call(f"Delete Volume Attachment {volume_id} - {server_id}") + self.mock_openstack_connection.get_volume.assert_called_once_with(name_or_id=volume_id) + self.mock_openstack_connection.get_server_by_id.assert_called_once_with(id=server_id) + self.mock_openstack_connection.detach_volume.assert_called_once_with(volume=expected_volume, server=expected_server) + + # Test exception case + self.mock_openstack_connection.detach_volume.side_effect = ConflictException(message="Conflict error") + with self.assertRaises(OpenStackConflictException): + self.openstack_connector.detach_volume(volume_id, server_id) + mock_logger_exception.assert_called_once_with( + f"Delete volume attachment (server: {server_id} volume: {volume_id}) failed!" + ) + + @patch("openstack_connector.openstack_connector.logger.exception") + @patch("openstack_connector.openstack_connector.logger.info") + def test_resize_volume(self, mock_logger_info, mock_logger_exception): + # Prepare test data + expected_volume = fakes.generate_fake_resource(volume.Volume) + volume_id = expected_volume.id + size = 100 + + # Mock the extend_volume method + self.mock_openstack_connection.block_storage.extend_volume.side_effect = [None, # No exception case + ResourceNotFound(message="Volume not found"), + # VolumeNotFoundException case + OpenStackCloudException( + message="Resize error")] # DefaultException case + + # Call the resize_volume method for different scenarios + # 1. No exception + self.openstack_connector.resize_volume(volume_id, size) + mock_logger_info.assert_called_once_with(f"Extend volume {volume_id} to size {size}") + + # 2. ResourceNotFound, expect VolumeNotFoundException + with self.assertRaises(VolumeNotFoundException): + self.openstack_connector.resize_volume(volume_id, size) + + # 3. OpenStackCloudException, expect DefaultException + with self.assertRaises(DefaultException): + self.openstack_connector.resize_volume(volume_id, size) + + @patch("openstack_connector.openstack_connector.logger.exception") + @patch("openstack_connector.openstack_connector.logger.info") + def test_create_volume(self, mock_logger_info, mock_logger_exception): + # Prepare test data + volume_name = "test_volume" + volume_storage = 100 + metadata = {"key": "value"} + + # Mock the create_volume method + self.mock_openstack_connection.block_storage.create_volume.side_effect = [ + Volume(id="volume_id"), # Successful case + ResourceFailure(message="Volume creation failed"), # ResourceNotAvailableException case + ] + + # Call the create_volume method for different scenarios + # 1. Successful case + result_volume = self.openstack_connector.create_volume(volume_name, volume_storage, metadata) + mock_logger_info.assert_called_once_with(f"Creating volume with {volume_storage} GB storage") + self.assertIsInstance(result_volume, Volume) + + # 2. ResourceFailure, expect ResourceNotAvailableException + with self.assertRaises(ResourceNotAvailableException): + self.openstack_connector.create_volume(volume_name, volume_storage, metadata) + mock_logger_exception.assert_called_once_with(f"Trying to create volume with {volume_storage} GB failed", exc_info=True) + + @patch("openstack_connector.openstack_connector.logger.exception") + def test_get_network(self, mock_logger_exception): + with tempfile.NamedTemporaryFile(mode='w+', delete=False) as temp_file: + temp_file.write(CONFIG_DATA) + + # Call the load_config_yml method with the temporary file path + self.openstack_connector.load_config_yml(temp_file.name) + + # Mock the find_network method + self.mock_openstack_connection.network.find_network.return_value = Network(id="my_network") + + # Call the get_network method + result_network = self.openstack_connector.get_network() + + # Assertions + self.assertIsInstance(result_network, Network) + self.assertEqual(result_network.id, "my_network") + self.mock_openstack_connection.network.find_network.assert_called_once_with(self.openstack_connector.NETWORK) + mock_logger_exception.assert_not_called() # Ensure no exception is logged + + @patch("openstack_connector.openstack_connector.logger.exception") + @patch("openstack_connector.openstack_connector.logger.info") + def test_import_existing_keypair(self, mock_logger_info, mock_logger_exception): + # Mock the get_keypair method for existing keypair + existing_keypair = fakes.generate_fake_resource(keypair.Keypair) + + self.mock_openstack_connection.get_keypair.return_value = existing_keypair + + # Call the import_keypair method for an existing keypair + result_keypair = self.openstack_connector.import_keypair(keyname=existing_keypair.name, public_key=existing_keypair.public_key) + + # Assertions for existing keypair + self.assertEqual(result_keypair, existing_keypair) + self.mock_openstack_connection.get_keypair.assert_called_once_with(name_or_id=existing_keypair.name) + mock_logger_info.assert_called_once_with(f"Get Keypair {existing_keypair.name}") + self.mock_openstack_connection.create_keypair.assert_not_called() + self.mock_openstack_connection.delete_keypair.assert_not_called() + mock_logger_exception.assert_not_called() + + @patch("openstack_connector.openstack_connector.logger.exception") + @patch("openstack_connector.openstack_connector.logger.info") + def test_import_non_existing_keypair(self, mock_logger_info, mock_logger_exception): + # Mock the get_keypair method for non-existing keypair + new_keypair = fakes.generate_fake_resource(keypair.Keypair) + + self.mock_openstack_connection.get_keypair.return_value = None + self.mock_openstack_connection.create_keypair.return_value = new_keypair + + # Call the import_keypair method for a new keypair + result_keypair = self.openstack_connector.import_keypair(keyname=new_keypair.name, public_key=new_keypair.public_key) + + # Assertions for new keypair + self.assertEqual(result_keypair, new_keypair) + + self.mock_openstack_connection.get_keypair.assert_called_with(name_or_id=new_keypair.name) + self.mock_openstack_connection.create_keypair.assert_called_once_with(name=new_keypair.name, public_key=new_keypair.public_key) + mock_logger_info.assert_called_with(f"Create Keypair {new_keypair.name}") + self.mock_openstack_connection.delete_keypair.assert_not_called() + mock_logger_exception.assert_not_called() + + @patch("openstack_connector.openstack_connector.logger.exception") + @patch("openstack_connector.openstack_connector.logger.info") + def test_import_changed_keypair(self, mock_logger_info, mock_logger_exception): + # Mock the get_keypair method for keypair with changed public_key + changed_keypair = fakes.generate_fake_resource(keypair.Keypair) + old_keypair = fakes.generate_fake_resource(keypair.Keypair) + changed_keypair.name = old_keypair.name + + self.mock_openstack_connection.get_keypair.return_value = old_keypair + self.mock_openstack_connection.create_keypair.return_value = changed_keypair + + # Call the import_keypair method for a keypair with changed public_key + result_keypair = self.openstack_connector.import_keypair(keyname=changed_keypair.name, public_key=changed_keypair.public_key) + + # Assertions for keypair with changed public_key + self.assertEqual(result_keypair, changed_keypair) + self.mock_openstack_connection.get_keypair.assert_called_with(name_or_id=changed_keypair.name) + self.mock_openstack_connection.create_keypair.assert_called_once_with(name=changed_keypair.name, + public_key=changed_keypair.public_key) + self.mock_openstack_connection.delete_keypair.assert_called_once_with(name=changed_keypair.name) + mock_logger_info.assert_any_call(f"Delete keypair: {changed_keypair.name}") + mock_logger_info.assert_any_call(f"Key {changed_keypair.name} has changed. Replace old Key") + mock_logger_exception.assert_not_called() + + @patch("openstack_connector.openstack_connector.logger.exception") + @patch("openstack_connector.openstack_connector.logger.info") + def test_import_same_keypair(self, mock_logger_info, mock_logger_exception): + # Mock the get_keypair method for keypair with same public_key + same_keypair = fakes.generate_fake_resource(keypair.Keypair) + + self.mock_openstack_connection.get_keypair.return_value = same_keypair + + # Call the import_keypair method for a keypair with same public_key + result_keypair = self.openstack_connector.import_keypair(keyname=same_keypair.name, public_key=same_keypair.public_key) + + # Assertions for keypair with same public_key + self.assertEqual(result_keypair, same_keypair) + self.mock_openstack_connection.get_keypair.assert_called_with(name_or_id=same_keypair.name) + self.mock_openstack_connection.create_keypair.assert_not_called() + self.mock_openstack_connection.delete_keypair.assert_not_called() + mock_logger_info.assert_called_with(f"Get Keypair {same_keypair.name}") + mock_logger_exception.assert_not_called() + + @patch("openstack_connector.openstack_connector.logger.info") + def test_create_add_keys_script(self, mock_logger_info): + # Prepare test data + keys = ["key1", "key2", "key3"] + + # Call the create_add_keys_script method + result_script = self.openstack_connector.create_add_keys_script(keys) + print(result_script) + + # Assertions + expected_script_content = '#!/bin/bash\ndeclare -a keys_to_add=("key1" "key2" "key3" )\necho "Found keys: ${#keys_to_add[*]}"\nfor ix in ${!keys_to_add[*]}\ndo\n printf "\\n%s" "${keys_to_add[$ix]}" >> /home/ubuntu/.ssh/authorized_keys\n\ndone\n' + expected_script_content=encodeutils.safe_encode(expected_script_content.encode("utf-8")) + + # Additional assertions + mock_logger_info.assert_called_once_with("create add key script") + + # Check that the real script content matches the expected content + self.assertEqual(result_script, expected_script_content) + + @patch("openstack_connector.openstack_connector.socket.socket") + @patch("openstack_connector.openstack_connector.logger.info") + def test_netcat(self, mock_logger_info, mock_socket): + # Replace with the actual host and port + host = "example.com" + port = 22 + + # Mock the connect_ex method to simulate the connection result + mock_socket.return_value.connect_ex.return_value = 0 + + # Call the netcat method + result = self.openstack_connector.netcat(host, port) + + # Assertions + self.assertTrue(result) # Adjust based on your logic + mock_logger_info.assert_any_call(f"Checking SSH Connection {host}:{port}") + mock_socket.assert_called_once_with(socket.AF_INET, socket.SOCK_STREAM) + mock_socket.return_value.settimeout.assert_called_once_with(5) + mock_socket.return_value.connect_ex.assert_called_once_with((host, port)) + mock_logger_info.assert_any_call(f"Checking SSH Connection {host}:{port} Result = 0") + + @patch("openstack_connector.openstack_connector.logger.info") + def test_get_flavor(self, mock_logger_info): + # Replace with the actual flavor name or ID + expected_flavor= fakes.generate_fake_resource(flavor.Flavor) + + + # Mock the get_flavor method to simulate fetching a flavor + self.mock_openstack_connection.get_flavor.return_value = expected_flavor + + + # Call the get_flavor method + result_flavor = self.openstack_connector.get_flavor(expected_flavor.name) + + # Assertions + self.assertEqual(result_flavor, expected_flavor) + mock_logger_info.assert_called_with(f"Get flavor {expected_flavor.name}") + self.mock_openstack_connection.get_flavor.assert_called_once_with(name_or_id=expected_flavor.name, get_extra=True) + + @mock.patch("openstack_connector.openstack_connector.logger.info") + def test_get_flavors(self, mock_logger_info): + # Replace with the actual flavors you want to simulate + expected_flavors = list(fakes.generate_fake_resources(flavor.Flavor, count=3)) + + # Mock the list_flavors method to simulate fetching flavors + self.mock_openstack_connection.list_flavors.return_value = expected_flavors + + # Call the get_flavors method + result_flavors = self.openstack_connector.get_flavors() + + # Assertions + self.assertEqual(result_flavors, expected_flavors) + mock_logger_info.assert_any_call("Get Flavors") + mock_logger_info.assert_any_call([flav["name"] for flav in expected_flavors]) + + self.mock_openstack_connection.list_flavors.assert_called_once_with(get_extra=True) + + @mock.patch("openstack_connector.openstack_connector.logger.info") + def test_get_servers_by_bibigrid_id(self, mock_logger_info): + # Replace with the actual Bibigrid ID you want to test + bibigrid_id = "your_bibigrid_id" + + # Replace with the actual servers you want to simulate + expected_servers = list(fakes.generate_fake_resources(flavor.Flavor, count=3)) + + # Mock the list_servers method to simulate fetching servers + self.mock_openstack_connection.list_servers.return_value = expected_servers + + # Call the get_servers_by_bibigrid_id method + result_servers = self.openstack_connector.get_servers_by_bibigrid_id(bibigrid_id) + + # Assertions + self.assertEqual(result_servers, expected_servers) + mock_logger_info.assert_called_with(f"Get Servery by Bibigrid id: {bibigrid_id}") + self.mock_openstack_connection.list_servers.assert_called_once_with(filters={"bibigrid_id": bibigrid_id, "name": bibigrid_id}) + + if __name__ == "__main__": unittest.main() From 6374c61f87a1e7cd8d011add3f87a0f8a72d840a Mon Sep 17 00:00:00 2001 From: dweinholz Date: Tue, 19 Dec 2023 16:11:09 +0100 Subject: [PATCH 05/39] more tests --- simple_vm_client/test_openstack_connector.py | 58 +++++++++++++++++++- 1 file changed, 57 insertions(+), 1 deletion(-) diff --git a/simple_vm_client/test_openstack_connector.py b/simple_vm_client/test_openstack_connector.py index 355e163..185d576 100644 --- a/simple_vm_client/test_openstack_connector.py +++ b/simple_vm_client/test_openstack_connector.py @@ -18,7 +18,7 @@ from openstack.compute.v2 import limits, server, keypair, flavor from openstack.compute.v2.image import Image from openstack.block_storage.v3.limits import Limit -from openstack.image.v2 import image as image_module +from openstack.image.v2 import image as image_module, image from ttypes import ImageNotFoundException, VolumeNotFoundException, DefaultException, SnapshotNotFoundException, \ ResourceNotAvailableException, OpenStackConflictException from openstack.compute import compute_service @@ -908,6 +908,62 @@ def test_get_servers_by_bibigrid_id(self, mock_logger_info): mock_logger_info.assert_called_with(f"Get Servery by Bibigrid id: {bibigrid_id}") self.mock_openstack_connection.list_servers.assert_called_once_with(filters={"bibigrid_id": bibigrid_id, "name": bibigrid_id}) + @mock.patch("openstack_connector.openstack_connector.logger.exception") + @mock.patch("openstack_connector.openstack_connector.logger.info") + def test_create_snapshot(self, mock_logger_info, mock_logger_exception): + # Replace with the actual parameters you want to test + openstack_id = "your_openstack_id" + name = "your_snapshot_name" + username = "your_username" + base_tags = ["tag1", "tag2"] + description = "your_description" + new_snapshot=fakes.generate_fake_resource(image.Image) + + # Mock the create_image_snapshot and image.add_tag methods + self.mock_openstack_connection.create_image_snapshot.return_value = new_snapshot + self.mock_openstack_connection.image.add_tag.return_value = None + + # Case 1: No exception + result_snapshot_id = self.openstack_connector.create_snapshot( + openstack_id, name, username, base_tags, description + ) + self.assertEqual(result_snapshot_id, new_snapshot.id) + + # Case 2: ConflictException + self.mock_openstack_connection.create_image_snapshot.side_effect = ConflictException(message="Conflict") + with self.assertRaises(OpenStackConflictException): + self.openstack_connector.create_snapshot( + openstack_id, name, username, base_tags, description + ) + mock_logger_exception.assert_called_once_with("Create snapshot your_openstack_id failed!") + + # Case 3: OpenStackCloudException + self.mock_openstack_connection.create_image_snapshot.side_effect = OpenStackCloudException(message="Cloud Exception") + with self.assertRaises(DefaultException): + self.openstack_connector.create_snapshot( + openstack_id, name, username, base_tags, description + ) + + @mock.patch("openstack_connector.openstack_connector.logger.exception") + @mock.patch("openstack_connector.openstack_connector.logger.info") + def test_delete_image(self, mock_logger_info, mock_logger_exception): + # Replace with the actual image_id you want to test + fake_image=fakes.generate_fake_resource(image.Image) + + # Mock the get_image and compute.delete_image methods + self.mock_openstack_connection.get_image.return_value = fake_image + self.mock_openstack_connection.compute.delete_image.return_value = None + + # Case 1: No exception + self.openstack_connector.delete_image(fake_image.id) + mock_logger_info.assert_any_call(f"Delete Image {fake_image.id}") + self.mock_openstack_connection.compute.delete_image.assert_called_once_with(fake_image.id) + + # Case 2: Other exceptions + self.mock_openstack_connection.get_image.side_effect = Exception("Some error") + with self.assertRaises(DefaultException): + self.openstack_connector.delete_image(fake_image.id) + mock_logger_exception.assert_called_once_with(f"Delete Image {fake_image.id} failed!") if __name__ == "__main__": unittest.main() From 37fe96da80ca3bc6930216889bbba0015c6f1a4d Mon Sep 17 00:00:00 2001 From: dweinholz Date: Tue, 19 Dec 2023 16:37:43 +0100 Subject: [PATCH 06/39] added coverage action --- .github/workflows/coverage.yml | 18 ++++++++++++++++++ 1 file changed, 18 insertions(+) create mode 100644 .github/workflows/coverage.yml diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml new file mode 100644 index 0000000..223a2a1 --- /dev/null +++ b/.github/workflows/coverage.yml @@ -0,0 +1,18 @@ +name: 'coverage' +on: + pull_request: + +jobs: + coverage: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v4.1.1 + + - name: Run Coverage + run: cd simplevm-client/simple_vm_client && coverage xml coverage.xml + + - name: Get Cover + uses: orgoro/coverage@v3.1 + with: + coverageFile: coverage.xml + token: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file From 56aa90750b0130c7a8b3248aa62d90141e7b32be Mon Sep 17 00:00:00 2001 From: dweinholz Date: Tue, 19 Dec 2023 16:40:20 +0100 Subject: [PATCH 07/39] Update coverage.yml --- .github/workflows/coverage.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 223a2a1..445dd26 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -1,6 +1,8 @@ name: 'coverage' on: pull_request: + workflow_dispatch: + jobs: coverage: @@ -15,4 +17,4 @@ jobs: uses: orgoro/coverage@v3.1 with: coverageFile: coverage.xml - token: ${{ secrets.GITHUB_TOKEN }} \ No newline at end of file + token: ${{ secrets.GITHUB_TOKEN }} From 7c95888adcb460893e96cc7188dd74e75487074e Mon Sep 17 00:00:00 2001 From: dweinholz Date: Tue, 19 Dec 2023 16:41:31 +0100 Subject: [PATCH 08/39] updated action --- .github/workflows/coverage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 445dd26..d08a82c 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -11,7 +11,7 @@ jobs: - uses: actions/checkout@v4.1.1 - name: Run Coverage - run: cd simplevm-client/simple_vm_client && coverage xml coverage.xml + run: cd simple_vm_client && coverage xml coverage.xml - name: Get Cover uses: orgoro/coverage@v3.1 From 50938fc17c7aef03f9feab522b153d3ca24e92a6 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Tue, 19 Dec 2023 16:43:03 +0100 Subject: [PATCH 09/39] install coverage --- .github/workflows/coverage.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index d08a82c..f257a8f 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -9,6 +9,8 @@ jobs: runs-on: ubuntu-latest steps: - uses: actions/checkout@v4.1.1 + - name: Install coverage + run: pip install coverage - name: Run Coverage run: cd simple_vm_client && coverage xml coverage.xml From 52af75c1def3799f963abcf415d6dfe81cc91435 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Tue, 19 Dec 2023 16:45:20 +0100 Subject: [PATCH 10/39] updated coverage path --- .github/workflows/coverage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index f257a8f..05e646d 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -18,5 +18,5 @@ jobs: - name: Get Cover uses: orgoro/coverage@v3.1 with: - coverageFile: coverage.xml + coverageFile: simple_vm_client/coverage.xml token: ${{ secrets.GITHUB_TOKEN }} From 7b820c0f06638545b2e8cb99376cd4b731598b63 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Tue, 19 Dec 2023 16:51:26 +0100 Subject: [PATCH 11/39] add .coveragerc --- .pre-commit-config.yaml | 5 +++-- simple_vm_client/.coveragerc | 6 ++++++ 2 files changed, 9 insertions(+), 2 deletions(-) create mode 100644 simple_vm_client/.coveragerc diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index de5821b..31c803d 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -9,10 +9,11 @@ repos: - id: end-of-file-fixer - id: check-yaml - - repo: https://github.com/psf/black - rev: 23.11.0 + - repo: https://github.com/psf/black-pre-commit-mirror + rev: 23.12.0 hooks: - id: black + language_version: python3.11 - repo: https://github.com/sondrelg/pep585-upgrade rev: 'v1.0.1' # Use the sha / tag you want to point at hooks: diff --git a/simple_vm_client/.coveragerc b/simple_vm_client/.coveragerc new file mode 100644 index 0000000..2bd8a6f --- /dev/null +++ b/simple_vm_client/.coveragerc @@ -0,0 +1,6 @@ +[run] +source = openstack_connector + +[report] +exclude_lines = + pragma: no cover From 2db13460c8af1e49902342c9576b92996361d525 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Tue, 19 Dec 2023 17:00:23 +0100 Subject: [PATCH 12/39] testing coverage report --- simple_vm_client/test_openstack_connector.py | 568 +++++++++++++------ 1 file changed, 388 insertions(+), 180 deletions(-) diff --git a/simple_vm_client/test_openstack_connector.py b/simple_vm_client/test_openstack_connector.py index 185d576..c731cc4 100644 --- a/simple_vm_client/test_openstack_connector.py +++ b/simple_vm_client/test_openstack_connector.py @@ -1,41 +1,64 @@ -import inspect import os import socket import tempfile import unittest from unittest import mock -from unittest.mock import MagicMock, patch, call +from unittest.mock import MagicMock, call, patch from openstack.block_storage.v3 import volume +from openstack.block_storage.v3.limits import Limit from openstack.block_storage.v3.volume import Volume from openstack.cloud import OpenStackCloudException -from openstack.exceptions import ResourceNotFound, ConflictException, ResourceFailure +from openstack.compute.v2 import flavor, keypair, limits, server +from openstack.compute.v2.server import Server +from openstack.exceptions import ConflictException, ResourceFailure, ResourceNotFound +from openstack.image.v2 import image +from openstack.image.v2 import image as image_module from openstack.network.v2.network import Network -from oslo_utils import encodeutils - -from openstack_connector.openstack_connector import OpenStackConnector from openstack.test import fakes -from openstack.compute.v2 import limits, server, keypair, flavor -from openstack.compute.v2.image import Image -from openstack.block_storage.v3.limits import Limit -from openstack.image.v2 import image as image_module, image -from ttypes import ImageNotFoundException, VolumeNotFoundException, DefaultException, SnapshotNotFoundException, \ - ResourceNotAvailableException, OpenStackConflictException -from openstack.compute import compute_service -from openstack.compute.v2.server import Server - -EXPECTED_IMAGE = image_module.Image(id='image_id_2', status='active', name="image_2", - metadata={'os_version': '22.04', 'os_distro': 'ubuntu'}, tags=["portalclient"]) -INACTIVE_IMAGE = image_module.Image(id='image_inactive', status='building', name="image_inactive", - metadata={'os_version': '22.04', 'os_distro': 'ubuntu'}, tags=["portalclient"]) +from openstack_connector.openstack_connector import OpenStackConnector +from oslo_utils import encodeutils +from ttypes import ( + DefaultException, + ImageNotFoundException, + OpenStackConflictException, + ResourceNotAvailableException, + SnapshotNotFoundException, + VolumeNotFoundException, +) + +EXPECTED_IMAGE = image_module.Image( + id="image_id_2", + status="active", + name="image_2", + metadata={"os_version": "22.04", "os_distro": "ubuntu"}, + tags=["portalclient"], +) +INACTIVE_IMAGE = image_module.Image( + id="image_inactive", + status="building", + name="image_inactive", + metadata={"os_version": "22.04", "os_distro": "ubuntu"}, + tags=["portalclient"], +) IMAGES = [ - image_module.Image(id='image_id_1', status='inactive', name="image_1", metadata={'os_version': '22.04', 'os_distro': 'ubuntu'}, - tags=["portalclient"]), + image_module.Image( + id="image_id_1", + status="inactive", + name="image_1", + metadata={"os_version": "22.04", "os_distro": "ubuntu"}, + tags=["portalclient"], + ), EXPECTED_IMAGE, - image_module.Image(id='image_id_3', status='active', name="image_3", metadata={'os_version': '22.04', 'os_distro': 'centos'}, - tags=["portalclient"]), - INACTIVE_IMAGE + image_module.Image( + id="image_id_3", + status="active", + name="image_3", + metadata={"os_version": "22.04", "os_distro": "centos"}, + tags=["portalclient"], + ), + INACTIVE_IMAGE, ] DEFAULT_SECURITY_GROUPS = ["defaultSimpleVM"] CONFIG_DATA = """ @@ -54,19 +77,21 @@ class TestOpenStackConnector(unittest.TestCase): - @classmethod def setUpClass(cls): - cls.env_patcher = mock.patch.dict(os.environ, { - "OS_AUTH_URL": "https://example.com", - "OS_USERNAME": "username", - "OS_PASSWORD": "password", - "OS_PROJECT_NAME": "project_name", - "OS_PROJECT_ID": "project_id", - "OS_USER_DOMAIN_NAME": "user_domain", - "OS_PROJECT_DOMAIN_ID": "project_domain_id", - "USE_APPLICATION_CREDENTIALS": "False"} - ) + cls.env_patcher = mock.patch.dict( + os.environ, + { + "OS_AUTH_URL": "https://example.com", + "OS_USERNAME": "username", + "OS_PASSWORD": "password", + "OS_PROJECT_NAME": "project_name", + "OS_PROJECT_ID": "project_id", + "OS_USER_DOMAIN_NAME": "user_domain", + "OS_PROJECT_DOMAIN_ID": "project_domain_id", + "USE_APPLICATION_CREDENTIALS": "False", + }, + ) cls.env_patcher.start() super().setUpClass() @@ -82,7 +107,9 @@ def setUp(self): self.mock_openstack_connection = MagicMock() with patch.object(OpenStackConnector, "__init__", lambda x, y, z: None): self.openstack_connector = OpenStackConnector(None, None) - self.openstack_connector.openstack_connection = self.mock_openstack_connection + self.openstack_connector.openstack_connection = ( + self.mock_openstack_connection + ) self.openstack_connector.DEFAULT_SECURITY_GROUPS = DEFAULT_SECURITY_GROUPS def init_openstack_connector(self): @@ -94,7 +121,7 @@ def init_openstack_connector(self): def test_load_config_yml(self): # Create a temporary YAML file with sample configuration - with tempfile.NamedTemporaryFile(mode='w+', delete=False) as temp_file: + with tempfile.NamedTemporaryFile(mode="w+", delete=False) as temp_file: temp_file.write(CONFIG_DATA) # Call the load_config_yml method with the temporary file path @@ -108,9 +135,11 @@ def test_load_config_yml(self): self.assertEqual(self.openstack_connector.CLOUD_SITE, "my_cloud_site") self.assertEqual(self.openstack_connector.SSH_PORT_CALCULATION, 22) self.assertEqual(self.openstack_connector.UDP_PORT_CALCULATION, 12345) - self.assertEqual(self.openstack_connector.FORC_SECURITY_GROUP_ID, "forc_security_group_id") + self.assertEqual( + self.openstack_connector.FORC_SECURITY_GROUP_ID, "forc_security_group_id" + ) - @patch('openstack_connector.openstack_connector.logger') + @patch("openstack_connector.openstack_connector.logger") def test_load_env_config_username_password(self, mock_logger): openstack_connector = self.init_openstack_connector() @@ -130,13 +159,16 @@ def test_load_env_config_username_password(self, mock_logger): # Assert that logger.info was called with the expected message mock_logger.info.assert_called_once_with("Load environment config: OpenStack") - @patch('openstack_connector.openstack_connector.logger') - @patch.dict(os.environ, { - "OS_AUTH_URL": "https://example.com", - "USE_APPLICATION_CREDENTIALS": "True", - "OS_APPLICATION_CREDENTIAL_ID": "app_cred_id", - "OS_APPLICATION_CREDENTIAL_SECRET": "app_cred_secret" - }) + @patch("openstack_connector.openstack_connector.logger") + @patch.dict( + os.environ, + { + "OS_AUTH_URL": "https://example.com", + "USE_APPLICATION_CREDENTIALS": "True", + "OS_APPLICATION_CREDENTIAL_ID": "app_cred_id", + "OS_APPLICATION_CREDENTIAL_SECRET": "app_cred_secret", + }, + ) def test_load_env_config_application_credentials(self, mock_logger): # Create an instance of OpenStackConnector openstack_connector = self.init_openstack_connector() @@ -148,23 +180,24 @@ def test_load_env_config_application_credentials(self, mock_logger): self.assertEqual(openstack_connector.AUTH_URL, "https://example.com") self.assertTrue(openstack_connector.USE_APPLICATION_CREDENTIALS) self.assertEqual(openstack_connector.APPLICATION_CREDENTIAL_ID, "app_cred_id") - self.assertEqual(openstack_connector.APPLICATION_CREDENTIAL_SECRET, "app_cred_secret") + self.assertEqual( + openstack_connector.APPLICATION_CREDENTIAL_SECRET, "app_cred_secret" + ) # Assert that logger.info was called with the expected messages expected_calls = [ call("Load environment config: OpenStack"), - call("APPLICATION CREDENTIALS will be used!") + call("APPLICATION CREDENTIALS will be used!"), ] mock_logger.info.assert_has_calls(expected_calls, any_order=False) - @patch('openstack_connector.openstack_connector.logger') - @patch.dict(os.environ, { - "OS_AUTH_URL": ""}) + @patch("openstack_connector.openstack_connector.logger") + @patch.dict(os.environ, {"OS_AUTH_URL": ""}) def test_load_env_config_missing_os_auth_url(self, mock_logger): openstack_connector = self.init_openstack_connector() # Mock sys.exit to capture the exit status - with patch('sys.exit') as mock_exit: + with patch("sys.exit") as mock_exit: # Call the load_env_config method openstack_connector.load_env_config() # Assert that logger.error was called with the expected message @@ -173,14 +206,14 @@ def test_load_env_config_missing_os_auth_url(self, mock_logger): # Assert that sys.exit was called with status code 1 mock_exit.assert_called_once_with(1) - @patch('openstack_connector.openstack_connector.logger') + @patch("openstack_connector.openstack_connector.logger") @patch.dict(os.environ, {"USE_APPLICATION_CREDENTIALS": "True"}) def test_load_env_config_missing_app_cred_vars(self, mock_logger): # Create an instance of OpenStackConnector openstack_connector = self.init_openstack_connector() # Mock sys.exit to capture the exit status - with patch('sys.exit') as mock_exit: + with patch("sys.exit") as mock_exit: # Call the load_env_config method openstack_connector.load_env_config() @@ -191,16 +224,19 @@ def test_load_env_config_missing_app_cred_vars(self, mock_logger): # Assert that sys.exit was called with status code 1 mock_exit.assert_called_once_with(1) - @patch('openstack_connector.openstack_connector.logger') - @patch.dict(os.environ, { - "USE_APPLICATION_CREDENTIALS": "False", - "OS_USERNAME": "test_username", - "OS_PASSWORD": "test_password", - "OS_PROJECT_NAME": "test_project_name", - "OS_PROJECT_ID": "test_project_id", - "OS_USER_DOMAIN_NAME": "test_user_domain", - "OS_PROJECT_DOMAIN_ID": "test_project_domain" - }) + @patch("openstack_connector.openstack_connector.logger") + @patch.dict( + os.environ, + { + "USE_APPLICATION_CREDENTIALS": "False", + "OS_USERNAME": "test_username", + "OS_PASSWORD": "test_password", + "OS_PROJECT_NAME": "test_project_name", + "OS_PROJECT_ID": "test_project_id", + "OS_USER_DOMAIN_NAME": "test_user_domain", + "OS_PROJECT_DOMAIN_ID": "test_project_domain", + }, + ) def test_load_env_config_missing_username_password_vars(self, mock_logger): # Create an instance of OpenStackConnector using the helper method openstack_connector = self.init_openstack_connector() @@ -210,7 +246,7 @@ def test_load_env_config_missing_username_password_vars(self, mock_logger): del os.environ["OS_PASSWORD"] # Mock sys.exit to capture the exit status - with patch('sys.exit') as mock_exit: + with patch("sys.exit") as mock_exit: # Call the load_env_config method openstack_connector.load_env_config() @@ -223,56 +259,62 @@ def test_load_env_config_missing_username_password_vars(self, mock_logger): def test_get_default_security_groups(self): # Call the _get_default_security_groups method - default_security_groups = self.openstack_connector._get_default_security_groups() + default_security_groups = ( + self.openstack_connector._get_default_security_groups() + ) # Assert that the returned list is a copy of the DEFAULT_SECURITY_GROUPS attribute - self.assertEqual(default_security_groups, self.openstack_connector.DEFAULT_SECURITY_GROUPS) + self.assertEqual( + default_security_groups, self.openstack_connector.DEFAULT_SECURITY_GROUPS + ) # Assert that modifying the returned list does not affect the DEFAULT_SECURITY_GROUPS attribute default_security_groups.append("new_security_group") - self.assertNotEqual(default_security_groups, self.openstack_connector.DEFAULT_SECURITY_GROUPS) + self.assertNotEqual( + default_security_groups, self.openstack_connector.DEFAULT_SECURITY_GROUPS + ) @patch("openstack_connector.openstack_connector.logger.info") - def test_get_image(self,mock_logger_info): + def test_get_image(self, mock_logger_info): self.mock_openstack_connection.get_image.return_value = EXPECTED_IMAGE result = self.openstack_connector.get_image(EXPECTED_IMAGE.id) mock_logger_info.assert_called_once_with(f"Get Image {EXPECTED_IMAGE.id}") self.assertEqual(result, EXPECTED_IMAGE) - @patch("openstack_connector.openstack_connector.logger.info") - def test_get_image_not_found_exception(self,mock_logger_info): + @patch("openstack_connector.openstack_connector.logger.info") + def test_get_image_not_found_exception(self, mock_logger_info): # Configure the mock_openstack_connection.get_image to return None self.mock_openstack_connection.get_image.return_value = None # Configure the ImageNotFoundException to be raised with self.assertRaises(ImageNotFoundException) as context: # Call the method with an image ID that will not be found - self.openstack_connector.get_image('nonexistent_id', ignore_not_found=False) - mock_logger_info.assert_called_once_with(f"Get Image nonexistent_id") + self.openstack_connector.get_image("nonexistent_id", ignore_not_found=False) + mock_logger_info.assert_called_once_with("Get Image nonexistent_id") # Assert that the exception contains the expected message and image ID - self.assertEqual( - context.exception.message, - "Image nonexistent_id not found!" - ) + self.assertEqual(context.exception.message, "Image nonexistent_id not found!") + @patch("openstack_connector.openstack_connector.logger.info") - def test_get_image_not_active_exception(self,mock_logger_info): + def test_get_image_not_active_exception(self, mock_logger_info): # Configure the mock_openstack_connection.get_image to return the not active image self.mock_openstack_connection.get_image.return_value = INACTIVE_IMAGE # Configure the ImageNotFoundException to be raised with self.assertRaises(ImageNotFoundException) as context: # Call the method with the not active image ID and set ignore_not_active to False - self.openstack_connector.get_image(name_or_id=INACTIVE_IMAGE.name, ignore_not_active=False) + self.openstack_connector.get_image( + name_or_id=INACTIVE_IMAGE.name, ignore_not_active=False + ) mock_logger_info.assert_called_once_with(f"Get Image {INACTIVE_IMAGE.name}") # Assert that the exception contains the expected message and image ID self.assertEqual( context.exception.message, - f"Image {INACTIVE_IMAGE.name} found but not active!" + f"Image {INACTIVE_IMAGE.name} found but not active!", ) - @patch("openstack_connector.openstack_connector.logger.info") - def test_get_images(self,mock_logger_info): + @patch("openstack_connector.openstack_connector.logger.info") + def test_get_images(self, mock_logger_info): # Configure the mock_openstack_connection.image.images to return the fake images self.mock_openstack_connection.image.images.return_value = IMAGES @@ -283,42 +325,48 @@ def test_get_images(self,mock_logger_info): mock_logger_info.assert_any_call(f"Found images - {image_names}") - # Assert that the method returns the expected result self.assertEqual(result, IMAGES) + @patch("openstack_connector.openstack_connector.logger.info") - def test_get_active_image_by_os_version(self,mock_logger_info): + def test_get_active_image_by_os_version(self, mock_logger_info): # Generate a set of fake images with different properties - os_version='22.04' + os_version = "22.04" # Configure the mock_openstack_connection.list_images to return the fake images self.mock_openstack_connection.list_images.return_value = IMAGES # Call the method with specific os_version and os_distro - result = self.openstack_connector.get_active_image_by_os_version(os_version=os_version, os_distro='ubuntu') - mock_logger_info.assert_called_with(f"Get active Image by os-version: {os_version}") - + result = self.openstack_connector.get_active_image_by_os_version( + os_version=os_version, os_distro="ubuntu" + ) + mock_logger_info.assert_called_with( + f"Get active Image by os-version: {os_version}" + ) # Assert that the method returns the expected image self.assertEqual(result, EXPECTED_IMAGE) @patch("openstack_connector.openstack_connector.logger.info") - def test_get_active_image_by_os_version_not_found_exception(self,mock_logger_info): + def test_get_active_image_by_os_version_not_found_exception(self, mock_logger_info): # Configure the mock_openstack_connection.list_images to return an empty list self.mock_openstack_connection.list_images.return_value = [] - os_version='nonexistent_version' + os_version = "nonexistent_version" # Configure the ImageNotFoundException to be raised with self.assertRaises(ImageNotFoundException) as context: # Call the method with an os_version and os_distro that won't find a matching image - self.openstack_connector.get_active_image_by_os_version(os_version ,'nonexistent_distro') - mock_logger_info.assert_called_with(f"Get active Image by os-version: {os_version}") - + self.openstack_connector.get_active_image_by_os_version( + os_version, "nonexistent_distro" + ) + mock_logger_info.assert_called_with( + f"Get active Image by os-version: {os_version}" + ) # Assert that the exception contains the expected message self.assertEqual( context.exception.message, - "Old Image was deactivated! No image with os_version:nonexistent_version and os_distro:nonexistent_distro found!" + "Old Image was deactivated! No image with os_version:nonexistent_version and os_distro:nonexistent_distro found!", ) def test_replace_inactive_image(self): @@ -330,7 +378,9 @@ def test_replace_inactive_image(self): self.mock_openstack_connection.get_image.return_value = INACTIVE_IMAGE # Call the method with the inactive image ID and set replace_inactive to True - result = self.openstack_connector.get_image('inactive_id', replace_inactive=True) + result = self.openstack_connector.get_image( + "inactive_id", replace_inactive=True + ) # Assert that the method returns the replacement image self.assertEqual(result, EXPECTED_IMAGE) @@ -341,7 +391,7 @@ def test_get_limits(self): volume_limits = fakes.generate_fake_resource(Limit) self.mock_openstack_connection.get_compute_limits.return_value = compute_limits self.mock_openstack_connection.get_volume_limits.return_value = volume_limits - result = self.openstack_connector.get_limits() + self.openstack_connector.get_limits() @patch("openstack_connector.openstack_connector.logger.info") @patch("openstack_connector.openstack_connector.logger.error") @@ -363,8 +413,14 @@ def test_create_server(self, mock_logger_error, mock_logger_info): # Call the create_server method result = self.openstack_connector.create_server( - name, image_id, flavor_id, network_id, userdata, key_name, - metadata, security_groups + name, + image_id, + flavor_id, + network_id, + userdata, + key_name, + metadata, + security_groups, ) # Check if the method logs the correct information @@ -404,7 +460,9 @@ def test_get_volume(self, mock_logger_exception, mock_logger_info): mock_logger_info.assert_called_once_with(f"Get Volume {name_or_id}") # Check if the get_volume method on openstack_connection was called with the expected parameters - self.mock_openstack_connection.get_volume.assert_called_once_with(name_or_id=name_or_id) + self.mock_openstack_connection.get_volume.assert_called_once_with( + name_or_id=name_or_id + ) # Check if the method returns the fake volume object self.assertEqual(result, fake_volume) @@ -418,7 +476,9 @@ def test_get_volume_exception(self, mock_logger_exception): self.mock_openstack_connection.get_volume.return_value = None # Call the get_volume method and expect a VolumeNotFoundException - with self.assertRaises(Exception): # Replace Exception with the actual exception type + with self.assertRaises( + Exception + ): # Replace Exception with the actual exception type self.openstack_connector.get_volume(name_or_id) # Check if the method logs the correct exception information @@ -433,9 +493,15 @@ def test_delete_volume(self, mock_logger_exception, mock_logger_info): # Mock the delete_volume method to avoid actual deletion in the test self.mock_openstack_connection.delete_volume.side_effect = [ None, # No exception case - ResourceNotFound(message="Volume not found"), # VolumeNotFoundException case - ConflictException(message="Delete volume failed"), # OpenStackCloudException case - OpenStackCloudException(message="Some other exception") # DefaultException case + ResourceNotFound( + message="Volume not found" + ), # VolumeNotFoundException case + ConflictException( + message="Delete volume failed" + ), # OpenStackCloudException case + OpenStackCloudException( + message="Some other exception" + ), # DefaultException case ] # Call the delete_volume method for different scenarios @@ -445,17 +511,23 @@ def test_delete_volume(self, mock_logger_exception, mock_logger_info): mock_logger_exception.assert_not_called() # 2. ResourceNotFound, expect VolumeNotFoundException - with self.assertRaises(VolumeNotFoundException): # Replace Exception with the actual exception type + with self.assertRaises( + VolumeNotFoundException + ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume(volume_id) mock_logger_exception.assert_called_with(f"No Volume with id {volume_id}") # 3. ConflictException, expect OpenStackCloudException - with self.assertRaises(OpenStackCloudException): # Replace Exception with the actual exception type + with self.assertRaises( + OpenStackCloudException + ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume(volume_id) mock_logger_exception.assert_called_with(f"Delete volume: {volume_id}) failed!") # 4. OpenStackCloudException, expect DefaultException - with self.assertRaises(DefaultException): # Replace Exception with the actual exception type + with self.assertRaises( + DefaultException + ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume(volume_id) @patch("openstack_connector.openstack_connector.logger.info") @@ -469,8 +541,12 @@ def test_create_volume_snapshot(self, mock_logger_error, mock_logger_info): # Mock the create_volume_snapshot method to avoid actual creation in the test self.mock_openstack_connection.create_volume_snapshot.side_effect = [ {"id": "snapshot_id"}, # No exception case - ResourceNotFound(message="Volume not found"), # VolumeNotFoundException case - OpenStackCloudException(message="Some other exception") # DefaultException case + ResourceNotFound( + message="Volume not found" + ), # VolumeNotFoundException case + OpenStackCloudException( + message="Some other exception" + ), # DefaultException case ] # Call the create_volume_snapshot method for different scenarios @@ -479,7 +555,9 @@ def test_create_volume_snapshot(self, mock_logger_error, mock_logger_info): volume_id, snapshot_name, snapshot_description ) self.assertEqual(snapshot_id, "snapshot_id") - mock_logger_info.assert_called_once_with(f"Create Snapshot for Volume {volume_id}") + mock_logger_info.assert_called_once_with( + f"Create Snapshot for Volume {volume_id}" + ) # 2. ResourceNotFound, expect VolumeNotFoundException with self.assertRaises(VolumeNotFoundException): @@ -515,7 +593,9 @@ def test_get_volume_snapshot(self, mock_logger_exception, mock_logger_info): # 2. None returned, expect VolumeNotFoundException with self.assertRaises(VolumeNotFoundException): self.openstack_connector.get_volume_snapshot(snapshot_id) - mock_logger_exception.assert_called_with(f"No volume Snapshot with id {snapshot_id}") + mock_logger_exception.assert_called_with( + f"No volume Snapshot with id {snapshot_id}" + ) @patch("openstack_connector.openstack_connector.logger.info") @patch("openstack_connector.openstack_connector.logger.exception") @@ -526,28 +606,44 @@ def test_delete_volume_snapshot(self, mock_logger_exception, mock_logger_info): # Mock the delete_volume_snapshot method to avoid actual deletion in the test self.mock_openstack_connection.delete_volume_snapshot.side_effect = [ None, # No exception case - ResourceNotFound(message="Snapshot not found"), # SnapshotNotFoundException case - ConflictException(message="Delete snapshot failed"), # OpenStackCloudException case - OpenStackCloudException(message="Some other exception") # DefaultException case + ResourceNotFound( + message="Snapshot not found" + ), # SnapshotNotFoundException case + ConflictException( + message="Delete snapshot failed" + ), # OpenStackCloudException case + OpenStackCloudException( + message="Some other exception" + ), # DefaultException case ] # Call the delete_volume_snapshot method for different scenarios # 1. No exception self.openstack_connector.delete_volume_snapshot(snapshot_id) - mock_logger_info.assert_called_once_with(f"Delete volume Snapshot {snapshot_id}") + mock_logger_info.assert_called_once_with( + f"Delete volume Snapshot {snapshot_id}" + ) # 2. ResourceNotFound, expect SnapshotNotFoundException - with self.assertRaises(SnapshotNotFoundException): # Replace Exception with the actual exception type + with self.assertRaises( + SnapshotNotFoundException + ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume_snapshot(snapshot_id) mock_logger_exception.assert_called_with(f"Snapshot not found: {snapshot_id}") # 3. ConflictException, expect OpenStackCloudException - with self.assertRaises(OpenStackCloudException): # Replace Exception with the actual exception type + with self.assertRaises( + OpenStackCloudException + ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume_snapshot(snapshot_id) - mock_logger_exception.assert_called_with(f"Delete volume snapshot: {snapshot_id}) failed!") + mock_logger_exception.assert_called_with( + f"Delete volume snapshot: {snapshot_id}) failed!" + ) # 4. OpenStackCloudException, expect DefaultException - with self.assertRaises(DefaultException): # Replace Exception with the actual exception type + with self.assertRaises( + DefaultException + ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume_snapshot(snapshot_id) @patch("openstack_connector.openstack_connector.logger.info") @@ -568,7 +664,9 @@ def test_get_servers(self, mock_logger_info): @patch("openstack_connector.openstack_connector.logger.error") @patch("openstack_connector.openstack_connector.logger.exception") @patch("openstack_connector.openstack_connector.logger.info") - def test_get_servers_by_ids(self, mock_logger_info, mock_logger_exception, mock_logger_error): + def test_get_servers_by_ids( + self, mock_logger_info, mock_logger_exception, mock_logger_error + ): # Prepare test data server_ids = ["id1", "id2", "id3", "id4"] expected_servers = [Server(id="id1"), Server(id="id2")] @@ -598,7 +696,9 @@ def test_get_servers_by_ids(self, mock_logger_info, mock_logger_exception, mock_ @patch("openstack_connector.openstack_connector.logger.info") def test_attach_volume_to_server(self, mock_logger_info, mock_logger_exception): # Prepare test data - expected_attachment = {"device": "/dev/vdb"} # Replace with actual attachment details + expected_attachment = { + "device": "/dev/vdb" + } # Replace with actual attachment details expected_server = fakes.generate_fake_resource(server.Server) expected_volume = fakes.generate_fake_resource(volume.Volume) openstack_id = expected_server.id @@ -606,21 +706,37 @@ def test_attach_volume_to_server(self, mock_logger_info, mock_logger_exception): # Mock the get_server and get_volume methods self.mock_openstack_connection.attach_volume.return_value = expected_attachment - self.mock_openstack_connection.get_server_by_id.return_value = expected_server # Replace with actual Server instance - self.mock_openstack_connection.get_volume.return_value = expected_volume # Replace with actual Volume instance + self.mock_openstack_connection.get_server_by_id.return_value = ( + expected_server # Replace with actual Server instance + ) + self.mock_openstack_connection.get_volume.return_value = ( + expected_volume # Replace with actual Volume instance + ) # Call the attach_volume_to_server method - result_attachment = self.openstack_connector.attach_volume_to_server(openstack_id, volume_id) + result_attachment = self.openstack_connector.attach_volume_to_server( + openstack_id, volume_id + ) # Assertions self.assertEqual(result_attachment, expected_attachment) - mock_logger_info.assert_called_with(f"Attaching volume {volume_id} to virtualmachine {openstack_id}") - self.mock_openstack_connection.get_server_by_id.assert_called_once_with(id=openstack_id) - self.mock_openstack_connection.get_volume.assert_called_once_with(name_or_id=volume_id) - self.mock_openstack_connection.attach_volume.assert_called_once_with(server=expected_server, volume=expected_volume) + mock_logger_info.assert_called_with( + f"Attaching volume {volume_id} to virtualmachine {openstack_id}" + ) + self.mock_openstack_connection.get_server_by_id.assert_called_once_with( + id=openstack_id + ) + self.mock_openstack_connection.get_volume.assert_called_once_with( + name_or_id=volume_id + ) + self.mock_openstack_connection.attach_volume.assert_called_once_with( + server=expected_server, volume=expected_volume + ) # Test exception case - self.mock_openstack_connection.attach_volume.side_effect = ConflictException(message="Conflict error") + self.mock_openstack_connection.attach_volume.side_effect = ConflictException( + message="Conflict error" + ) with self.assertRaises(OpenStackConflictException): self.openstack_connector.attach_volume_to_server(openstack_id, volume_id) mock_logger_exception.assert_called_once_with( @@ -638,19 +754,33 @@ def test_detach_volume(self, mock_logger_info, mock_logger_exception): volume_id = expected_volume.id # Mock the get_volume, get_server, and detach_volume methods - self.mock_openstack_connection.get_volume.return_value = expected_volume # Replace with actual Volume instance - self.mock_openstack_connection.get_server_by_id.return_value = expected_server # Replace with actual Server instance + self.mock_openstack_connection.get_volume.return_value = ( + expected_volume # Replace with actual Volume instance + ) + self.mock_openstack_connection.get_server_by_id.return_value = ( + expected_server # Replace with actual Server instance + ) # Call the detach_volume method self.openstack_connector.detach_volume(volume_id, server_id) # Assertions - mock_logger_info.assert_any_call(f"Delete Volume Attachment {volume_id} - {server_id}") - self.mock_openstack_connection.get_volume.assert_called_once_with(name_or_id=volume_id) - self.mock_openstack_connection.get_server_by_id.assert_called_once_with(id=server_id) - self.mock_openstack_connection.detach_volume.assert_called_once_with(volume=expected_volume, server=expected_server) + mock_logger_info.assert_any_call( + f"Delete Volume Attachment {volume_id} - {server_id}" + ) + self.mock_openstack_connection.get_volume.assert_called_once_with( + name_or_id=volume_id + ) + self.mock_openstack_connection.get_server_by_id.assert_called_once_with( + id=server_id + ) + self.mock_openstack_connection.detach_volume.assert_called_once_with( + volume=expected_volume, server=expected_server + ) # Test exception case - self.mock_openstack_connection.detach_volume.side_effect = ConflictException(message="Conflict error") + self.mock_openstack_connection.detach_volume.side_effect = ConflictException( + message="Conflict error" + ) with self.assertRaises(OpenStackConflictException): self.openstack_connector.detach_volume(volume_id, server_id) mock_logger_exception.assert_called_once_with( @@ -666,16 +796,19 @@ def test_resize_volume(self, mock_logger_info, mock_logger_exception): size = 100 # Mock the extend_volume method - self.mock_openstack_connection.block_storage.extend_volume.side_effect = [None, # No exception case - ResourceNotFound(message="Volume not found"), - # VolumeNotFoundException case - OpenStackCloudException( - message="Resize error")] # DefaultException case + self.mock_openstack_connection.block_storage.extend_volume.side_effect = [ + None, # No exception case + ResourceNotFound(message="Volume not found"), + # VolumeNotFoundException case + OpenStackCloudException(message="Resize error"), + ] # DefaultException case # Call the resize_volume method for different scenarios # 1. No exception self.openstack_connector.resize_volume(volume_id, size) - mock_logger_info.assert_called_once_with(f"Extend volume {volume_id} to size {size}") + mock_logger_info.assert_called_once_with( + f"Extend volume {volume_id} to size {size}" + ) # 2. ResourceNotFound, expect VolumeNotFoundException with self.assertRaises(VolumeNotFoundException): @@ -696,30 +829,42 @@ def test_create_volume(self, mock_logger_info, mock_logger_exception): # Mock the create_volume method self.mock_openstack_connection.block_storage.create_volume.side_effect = [ Volume(id="volume_id"), # Successful case - ResourceFailure(message="Volume creation failed"), # ResourceNotAvailableException case + ResourceFailure( + message="Volume creation failed" + ), # ResourceNotAvailableException case ] # Call the create_volume method for different scenarios # 1. Successful case - result_volume = self.openstack_connector.create_volume(volume_name, volume_storage, metadata) - mock_logger_info.assert_called_once_with(f"Creating volume with {volume_storage} GB storage") + result_volume = self.openstack_connector.create_volume( + volume_name, volume_storage, metadata + ) + mock_logger_info.assert_called_once_with( + f"Creating volume with {volume_storage} GB storage" + ) self.assertIsInstance(result_volume, Volume) # 2. ResourceFailure, expect ResourceNotAvailableException with self.assertRaises(ResourceNotAvailableException): - self.openstack_connector.create_volume(volume_name, volume_storage, metadata) - mock_logger_exception.assert_called_once_with(f"Trying to create volume with {volume_storage} GB failed", exc_info=True) + self.openstack_connector.create_volume( + volume_name, volume_storage, metadata + ) + mock_logger_exception.assert_called_once_with( + f"Trying to create volume with {volume_storage} GB failed", exc_info=True + ) @patch("openstack_connector.openstack_connector.logger.exception") def test_get_network(self, mock_logger_exception): - with tempfile.NamedTemporaryFile(mode='w+', delete=False) as temp_file: + with tempfile.NamedTemporaryFile(mode="w+", delete=False) as temp_file: temp_file.write(CONFIG_DATA) # Call the load_config_yml method with the temporary file path self.openstack_connector.load_config_yml(temp_file.name) # Mock the find_network method - self.mock_openstack_connection.network.find_network.return_value = Network(id="my_network") + self.mock_openstack_connection.network.find_network.return_value = Network( + id="my_network" + ) # Call the get_network method result_network = self.openstack_connector.get_network() @@ -727,7 +872,9 @@ def test_get_network(self, mock_logger_exception): # Assertions self.assertIsInstance(result_network, Network) self.assertEqual(result_network.id, "my_network") - self.mock_openstack_connection.network.find_network.assert_called_once_with(self.openstack_connector.NETWORK) + self.mock_openstack_connection.network.find_network.assert_called_once_with( + self.openstack_connector.NETWORK + ) mock_logger_exception.assert_not_called() # Ensure no exception is logged @patch("openstack_connector.openstack_connector.logger.exception") @@ -739,11 +886,15 @@ def test_import_existing_keypair(self, mock_logger_info, mock_logger_exception): self.mock_openstack_connection.get_keypair.return_value = existing_keypair # Call the import_keypair method for an existing keypair - result_keypair = self.openstack_connector.import_keypair(keyname=existing_keypair.name, public_key=existing_keypair.public_key) + result_keypair = self.openstack_connector.import_keypair( + keyname=existing_keypair.name, public_key=existing_keypair.public_key + ) # Assertions for existing keypair self.assertEqual(result_keypair, existing_keypair) - self.mock_openstack_connection.get_keypair.assert_called_once_with(name_or_id=existing_keypair.name) + self.mock_openstack_connection.get_keypair.assert_called_once_with( + name_or_id=existing_keypair.name + ) mock_logger_info.assert_called_once_with(f"Get Keypair {existing_keypair.name}") self.mock_openstack_connection.create_keypair.assert_not_called() self.mock_openstack_connection.delete_keypair.assert_not_called() @@ -759,13 +910,19 @@ def test_import_non_existing_keypair(self, mock_logger_info, mock_logger_excepti self.mock_openstack_connection.create_keypair.return_value = new_keypair # Call the import_keypair method for a new keypair - result_keypair = self.openstack_connector.import_keypair(keyname=new_keypair.name, public_key=new_keypair.public_key) + result_keypair = self.openstack_connector.import_keypair( + keyname=new_keypair.name, public_key=new_keypair.public_key + ) # Assertions for new keypair self.assertEqual(result_keypair, new_keypair) - self.mock_openstack_connection.get_keypair.assert_called_with(name_or_id=new_keypair.name) - self.mock_openstack_connection.create_keypair.assert_called_once_with(name=new_keypair.name, public_key=new_keypair.public_key) + self.mock_openstack_connection.get_keypair.assert_called_with( + name_or_id=new_keypair.name + ) + self.mock_openstack_connection.create_keypair.assert_called_once_with( + name=new_keypair.name, public_key=new_keypair.public_key + ) mock_logger_info.assert_called_with(f"Create Keypair {new_keypair.name}") self.mock_openstack_connection.delete_keypair.assert_not_called() mock_logger_exception.assert_not_called() @@ -782,16 +939,25 @@ def test_import_changed_keypair(self, mock_logger_info, mock_logger_exception): self.mock_openstack_connection.create_keypair.return_value = changed_keypair # Call the import_keypair method for a keypair with changed public_key - result_keypair = self.openstack_connector.import_keypair(keyname=changed_keypair.name, public_key=changed_keypair.public_key) + result_keypair = self.openstack_connector.import_keypair( + keyname=changed_keypair.name, public_key=changed_keypair.public_key + ) # Assertions for keypair with changed public_key self.assertEqual(result_keypair, changed_keypair) - self.mock_openstack_connection.get_keypair.assert_called_with(name_or_id=changed_keypair.name) - self.mock_openstack_connection.create_keypair.assert_called_once_with(name=changed_keypair.name, - public_key=changed_keypair.public_key) - self.mock_openstack_connection.delete_keypair.assert_called_once_with(name=changed_keypair.name) + self.mock_openstack_connection.get_keypair.assert_called_with( + name_or_id=changed_keypair.name + ) + self.mock_openstack_connection.create_keypair.assert_called_once_with( + name=changed_keypair.name, public_key=changed_keypair.public_key + ) + self.mock_openstack_connection.delete_keypair.assert_called_once_with( + name=changed_keypair.name + ) mock_logger_info.assert_any_call(f"Delete keypair: {changed_keypair.name}") - mock_logger_info.assert_any_call(f"Key {changed_keypair.name} has changed. Replace old Key") + mock_logger_info.assert_any_call( + f"Key {changed_keypair.name} has changed. Replace old Key" + ) mock_logger_exception.assert_not_called() @patch("openstack_connector.openstack_connector.logger.exception") @@ -803,11 +969,15 @@ def test_import_same_keypair(self, mock_logger_info, mock_logger_exception): self.mock_openstack_connection.get_keypair.return_value = same_keypair # Call the import_keypair method for a keypair with same public_key - result_keypair = self.openstack_connector.import_keypair(keyname=same_keypair.name, public_key=same_keypair.public_key) + result_keypair = self.openstack_connector.import_keypair( + keyname=same_keypair.name, public_key=same_keypair.public_key + ) # Assertions for keypair with same public_key self.assertEqual(result_keypair, same_keypair) - self.mock_openstack_connection.get_keypair.assert_called_with(name_or_id=same_keypair.name) + self.mock_openstack_connection.get_keypair.assert_called_with( + name_or_id=same_keypair.name + ) self.mock_openstack_connection.create_keypair.assert_not_called() self.mock_openstack_connection.delete_keypair.assert_not_called() mock_logger_info.assert_called_with(f"Get Keypair {same_keypair.name}") @@ -820,11 +990,12 @@ def test_create_add_keys_script(self, mock_logger_info): # Call the create_add_keys_script method result_script = self.openstack_connector.create_add_keys_script(keys) - print(result_script) # Assertions expected_script_content = '#!/bin/bash\ndeclare -a keys_to_add=("key1" "key2" "key3" )\necho "Found keys: ${#keys_to_add[*]}"\nfor ix in ${!keys_to_add[*]}\ndo\n printf "\\n%s" "${keys_to_add[$ix]}" >> /home/ubuntu/.ssh/authorized_keys\n\ndone\n' - expected_script_content=encodeutils.safe_encode(expected_script_content.encode("utf-8")) + expected_script_content = encodeutils.safe_encode( + expected_script_content.encode("utf-8") + ) # Additional assertions mock_logger_info.assert_called_once_with("create add key script") @@ -851,25 +1022,27 @@ def test_netcat(self, mock_logger_info, mock_socket): mock_socket.assert_called_once_with(socket.AF_INET, socket.SOCK_STREAM) mock_socket.return_value.settimeout.assert_called_once_with(5) mock_socket.return_value.connect_ex.assert_called_once_with((host, port)) - mock_logger_info.assert_any_call(f"Checking SSH Connection {host}:{port} Result = 0") + mock_logger_info.assert_any_call( + f"Checking SSH Connection {host}:{port} Result = 0" + ) @patch("openstack_connector.openstack_connector.logger.info") def test_get_flavor(self, mock_logger_info): # Replace with the actual flavor name or ID - expected_flavor= fakes.generate_fake_resource(flavor.Flavor) - + expected_flavor = fakes.generate_fake_resource(flavor.Flavor) # Mock the get_flavor method to simulate fetching a flavor self.mock_openstack_connection.get_flavor.return_value = expected_flavor - # Call the get_flavor method result_flavor = self.openstack_connector.get_flavor(expected_flavor.name) # Assertions self.assertEqual(result_flavor, expected_flavor) mock_logger_info.assert_called_with(f"Get flavor {expected_flavor.name}") - self.mock_openstack_connection.get_flavor.assert_called_once_with(name_or_id=expected_flavor.name, get_extra=True) + self.mock_openstack_connection.get_flavor.assert_called_once_with( + name_or_id=expected_flavor.name, get_extra=True + ) @mock.patch("openstack_connector.openstack_connector.logger.info") def test_get_flavors(self, mock_logger_info): @@ -887,7 +1060,9 @@ def test_get_flavors(self, mock_logger_info): mock_logger_info.assert_any_call("Get Flavors") mock_logger_info.assert_any_call([flav["name"] for flav in expected_flavors]) - self.mock_openstack_connection.list_flavors.assert_called_once_with(get_extra=True) + self.mock_openstack_connection.list_flavors.assert_called_once_with( + get_extra=True + ) @mock.patch("openstack_connector.openstack_connector.logger.info") def test_get_servers_by_bibigrid_id(self, mock_logger_info): @@ -901,12 +1076,18 @@ def test_get_servers_by_bibigrid_id(self, mock_logger_info): self.mock_openstack_connection.list_servers.return_value = expected_servers # Call the get_servers_by_bibigrid_id method - result_servers = self.openstack_connector.get_servers_by_bibigrid_id(bibigrid_id) + result_servers = self.openstack_connector.get_servers_by_bibigrid_id( + bibigrid_id + ) # Assertions self.assertEqual(result_servers, expected_servers) - mock_logger_info.assert_called_with(f"Get Servery by Bibigrid id: {bibigrid_id}") - self.mock_openstack_connection.list_servers.assert_called_once_with(filters={"bibigrid_id": bibigrid_id, "name": bibigrid_id}) + mock_logger_info.assert_called_with( + f"Get Servery by Bibigrid id: {bibigrid_id}" + ) + self.mock_openstack_connection.list_servers.assert_called_once_with( + filters={"bibigrid_id": bibigrid_id, "name": bibigrid_id} + ) @mock.patch("openstack_connector.openstack_connector.logger.exception") @mock.patch("openstack_connector.openstack_connector.logger.info") @@ -917,7 +1098,7 @@ def test_create_snapshot(self, mock_logger_info, mock_logger_exception): username = "your_username" base_tags = ["tag1", "tag2"] description = "your_description" - new_snapshot=fakes.generate_fake_resource(image.Image) + new_snapshot = fakes.generate_fake_resource(image.Image) # Mock the create_image_snapshot and image.add_tag methods self.mock_openstack_connection.create_image_snapshot.return_value = new_snapshot @@ -930,15 +1111,21 @@ def test_create_snapshot(self, mock_logger_info, mock_logger_exception): self.assertEqual(result_snapshot_id, new_snapshot.id) # Case 2: ConflictException - self.mock_openstack_connection.create_image_snapshot.side_effect = ConflictException(message="Conflict") + self.mock_openstack_connection.create_image_snapshot.side_effect = ( + ConflictException(message="Conflict") + ) with self.assertRaises(OpenStackConflictException): self.openstack_connector.create_snapshot( openstack_id, name, username, base_tags, description ) - mock_logger_exception.assert_called_once_with("Create snapshot your_openstack_id failed!") + mock_logger_exception.assert_called_once_with( + "Create snapshot your_openstack_id failed!" + ) # Case 3: OpenStackCloudException - self.mock_openstack_connection.create_image_snapshot.side_effect = OpenStackCloudException(message="Cloud Exception") + self.mock_openstack_connection.create_image_snapshot.side_effect = ( + OpenStackCloudException(message="Cloud Exception") + ) with self.assertRaises(DefaultException): self.openstack_connector.create_snapshot( openstack_id, name, username, base_tags, description @@ -948,7 +1135,7 @@ def test_create_snapshot(self, mock_logger_info, mock_logger_exception): @mock.patch("openstack_connector.openstack_connector.logger.info") def test_delete_image(self, mock_logger_info, mock_logger_exception): # Replace with the actual image_id you want to test - fake_image=fakes.generate_fake_resource(image.Image) + fake_image = fakes.generate_fake_resource(image.Image) # Mock the get_image and compute.delete_image methods self.mock_openstack_connection.get_image.return_value = fake_image @@ -957,13 +1144,34 @@ def test_delete_image(self, mock_logger_info, mock_logger_exception): # Case 1: No exception self.openstack_connector.delete_image(fake_image.id) mock_logger_info.assert_any_call(f"Delete Image {fake_image.id}") - self.mock_openstack_connection.compute.delete_image.assert_called_once_with(fake_image.id) + self.mock_openstack_connection.compute.delete_image.assert_called_once_with( + fake_image.id + ) # Case 2: Other exceptions self.mock_openstack_connection.get_image.side_effect = Exception("Some error") with self.assertRaises(DefaultException): self.openstack_connector.delete_image(fake_image.id) - mock_logger_exception.assert_called_once_with(f"Delete Image {fake_image.id} failed!") + mock_logger_exception.assert_called_once_with( + f"Delete Image {fake_image.id} failed!" + ) + + @mock.patch("openstack_connector.openstack_connector.logger.info") + def test_get_public_images(self, mock_logger_info): + # Replace with the actual public images you want to test + images = list(fakes.generate_fake_resources(image.Image, count=3)) + images[2].visibility = "private" + + # Mock the image.images() method with filters and extra_info + self.mock_openstack_connection.image.images.return_value = images[:2] + + # Call the get_public_images method + result_images = self.openstack_connector.get_public_images() + + # Assertions + self.assertEqual(result_images, images[:2]) # Exclude the private image + mock_logger_info.assert_any_call("Get public images") + if __name__ == "__main__": unittest.main() From 217a54a07e4e6e47dd4d0763f4bf9e314e01b88c Mon Sep 17 00:00:00 2001 From: dweinholz Date: Tue, 19 Dec 2023 17:55:40 +0100 Subject: [PATCH 13/39] fixed imports --- Dockerfile | 25 ++- simple_vm_client/__init__.py | 1 - .../template/__init__.py} | 0 .../forc_connector/template/template.py | 6 +- .../__pycache__/__init__.cpython-38.pyc | Bin 191 -> 0 bytes .../openstack_connector.py | 183 +++++++++--------- simple_vm_client/test_openstack_connector.py | 121 ++++++------ 7 files changed, 177 insertions(+), 159 deletions(-) rename simple_vm_client/{scripts/__init__.py,cover => forc_connector/template/__init__.py} (100%) delete mode 100644 simple_vm_client/openstack_connector/__pycache__/__init__.cpython-38.pyc diff --git a/Dockerfile b/Dockerfile index a1b9141..85eb54e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -1,11 +1,24 @@ FROM python:3.11.4-buster -RUN apt-get update -y -RUN apt-get install -y build-essential + +RUN apt-get update -y \ + && apt-get install -y build-essential \ + && apt-get clean \ + && rm -rf /var/lib/apt/lists/* + WORKDIR /code -ADD requirements.txt /code + +# Copy requirements and install them first to leverage Docker cache +COPY requirements.txt /code RUN pip install -r requirements.txt -ADD requirements.yml /code -ADD ansible.cfg /etc/ansible/ + +COPY requirements.yml /code +COPY ansible.cfg /etc/ansible/ RUN ansible-galaxy install -r requirements.yml -ADD . /code + +# Copy the entire project +COPY . /code + +# Set PYTHONPATH to include the project root +ENV PYTHONPATH /code + WORKDIR /code/simple_vm_client diff --git a/simple_vm_client/__init__.py b/simple_vm_client/__init__.py index 86a570c..e69de29 100644 --- a/simple_vm_client/__init__.py +++ b/simple_vm_client/__init__.py @@ -1 +0,0 @@ -__all__ = ['ttypes', 'constants', 'VirtualMachineService'] diff --git a/simple_vm_client/scripts/__init__.py,cover b/simple_vm_client/forc_connector/template/__init__.py similarity index 100% rename from simple_vm_client/scripts/__init__.py,cover rename to simple_vm_client/forc_connector/template/__init__.py diff --git a/simple_vm_client/forc_connector/template/template.py b/simple_vm_client/forc_connector/template/template.py index 32d3adc..75d24de 100644 --- a/simple_vm_client/forc_connector/template/template.py +++ b/simple_vm_client/forc_connector/template/template.py @@ -7,8 +7,9 @@ import requests import yaml -from ttypes import ResearchEnvironmentTemplate -from util.logger import setup_custom_logger + +from simple_vm_client.ttypes import ResearchEnvironmentTemplate +from simple_vm_client.util.logger import setup_custom_logger # from resenv.backend.Backend import Backend @@ -116,7 +117,6 @@ def update_playbooks(self) -> None: for template_metadata in templates_metadata: try: if template_metadata.get(NEEDS_FORC_SUPPORT, False): - metadata = ResearchEnvironmentMetadata( name=template_metadata[TEMPLATE_NAME], port=template_metadata[PORT], diff --git a/simple_vm_client/openstack_connector/__pycache__/__init__.cpython-38.pyc b/simple_vm_client/openstack_connector/__pycache__/__init__.cpython-38.pyc deleted file mode 100644 index d8194106905d628ca6a2e3e81342e865ba24462a..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 191 zcmWIL<>g`kg5C=ZNg(<$h(HF6K#l_t7qb9~6oz01O-8?!3`HPe1o10HKO;XkRX-)M zEHg#FJijQrxF9h(RX;f=Gc~UyzRU>7$}dgPEyyn_NzBoO2 None: sys.exit(1) self.USE_APPLICATION_CREDENTIALS = ( - os.environ.get("USE_APPLICATION_CREDENTIALS", "False").lower() == "true" + os.environ.get("USE_APPLICATION_CREDENTIALS", "False").lower() == "true" ) if self.USE_APPLICATION_CREDENTIALS: @@ -180,15 +183,15 @@ def load_env_config(self) -> None: self.PROJECT_DOMAIN_ID = os.environ["OS_PROJECT_DOMAIN_ID"] def create_server( - self, - name: str, - image_id: str, - flavor_id: str, - network_id: str, - userdata: str, - key_name: str, - metadata: dict[str, str], - security_groups: list[str], + self, + name: str, + image_id: str, + flavor_id: str, + network_id: str, + userdata: str, + key_name: str, + metadata: dict[str, str], + security_groups: list[str], ) -> Server: logger.info( f"Create Server:\n\tname: {name}\n\timage_id:{image_id}\n\tflavor_id:{flavor_id}\n\tmetadata:{metadata}" @@ -230,7 +233,7 @@ def delete_volume(self, volume_id: str) -> None: raise DefaultException(message=e.message) def create_volume_snapshot( - self, volume_id: str, name: str, description: str + self, volume_id: str, name: str, description: str ) -> str: try: logger.info(f"Create Snapshot for Volume {volume_id}") @@ -273,7 +276,7 @@ def delete_volume_snapshot(self, snapshot_id: str) -> None: raise DefaultException(message=e.message) def create_volume_by_source_volume( - self, volume_name: str, metadata: dict[str, str], source_volume_id: str + self, volume_name: str, metadata: dict[str, str], source_volume_id: str ) -> Volume: logger.info(f"Creating volume from source volume with id {source_volume_id}") try: @@ -289,7 +292,7 @@ def create_volume_by_source_volume( raise ResourceNotAvailableException(message=e.message) def create_volume_by_volume_snap( - self, volume_name: str, metadata: dict[str, str], volume_snap_id: str + self, volume_name: str, metadata: dict[str, str], volume_snap_id: str ) -> Volume: logger.info(f"Creating volume from volume snapshot with id {volume_snap_id}") try: @@ -327,7 +330,7 @@ def get_servers_by_ids(self, ids: list[str]) -> list[Server]: return servers def attach_volume_to_server( - self, openstack_id: str, volume_id: str + self, openstack_id: str, volume_id: str ) -> dict[str, str]: server = self.get_server(openstack_id=openstack_id) volume = self.get_volume(name_or_id=volume_id) @@ -368,7 +371,7 @@ def resize_volume(self, volume_id: str, size: int) -> None: raise DefaultException(message=str(e)) def create_volume( - self, volume_name: str, volume_storage: int, metadata: dict[str, str] + self, volume_name: str, volume_storage: int, metadata: dict[str, str] ) -> Volume: logger.info(f"Creating volume with {volume_storage} GB storage") try: @@ -485,9 +488,9 @@ def get_active_image_by_os_version(self, os_version: str, os_distro: str) -> Ima image_os_distro = metadata.get("os_distro", None) base_image_ref = metadata.get("base_image_ref", None) if ( - os_version == image_os_version - and image.status == "active" - and base_image_ref is None + os_version == image_os_version + and image.status == "active" + and base_image_ref is None ): if os_distro and os_distro == image_os_distro: return image @@ -499,11 +502,11 @@ def get_active_image_by_os_version(self, os_version: str, os_distro: str) -> Ima ) def get_image( - self, - name_or_id: str, - replace_inactive: bool = False, - ignore_not_active: bool = False, - ignore_not_found: bool = False, + self, + name_or_id: str, + replace_inactive: bool = False, + ignore_not_active: bool = False, + ignore_not_found: bool = False, ) -> Image: logger.info(f"Get Image {name_or_id}") @@ -527,12 +530,12 @@ def get_image( return image def create_snapshot( - self, - openstack_id: str, - name: str, - username: str, - base_tags: list[str], - description: str, + self, + openstack_id: str, + name: str, + username: str, + base_tags: list[str], + description: str, ) -> str: logger.info( f"Create Snapshot from Instance {openstack_id} with name {name} for {username}" @@ -644,9 +647,9 @@ def get_gateway_ip(self) -> dict[str, str]: return {"gateway_ip": self.GATEWAY_IP} def create_mount_init_script( - self, - new_volumes: list[dict[str, str]] = None, # type: ignore - attach_volumes: list[dict[str, str]] = None, # type: ignore + self, + new_volumes: list[dict[str, str]] = None, # type: ignore + attach_volumes: list[dict[str, str]] = None, # type: ignore ) -> str: logger.info(f"Create init script for volume ids:{new_volumes}") if not new_volumes and not attach_volumes: @@ -727,7 +730,7 @@ def delete_security_group_rule(self, openstack_id): ) def open_port_range_for_vm_in_project( - self, range_start, range_stop, openstack_id, ethertype="IPV4", protocol="TCP" + self, range_start, range_stop, openstack_id, ethertype="IPV4", protocol="TCP" ): server: Server = self.openstack_connection.get_server_by_id(id=openstack_id) if server is None: @@ -776,13 +779,13 @@ def open_port_range_for_vm_in_project( raise OpenStackConflictException(message=e.message) def create_security_group( - self, - name: str, - udp_port: int = None, # type: ignore - ssh: bool = True, - udp: bool = False, - description: str = "", - research_environment_metadata: ResearchEnvironmentMetadata = None, + self, + name: str, + udp_port: int = None, # type: ignore + ssh: bool = True, + udp: bool = False, + description: str = "", + research_environment_metadata: ResearchEnvironmentMetadata = None, ) -> SecurityGroup: logger.info(f"Create new security group {name}") sec: SecurityGroup = self.openstack_connection.get_security_group( @@ -889,7 +892,7 @@ def is_security_group_in_use(self, security_group_id): return False def get_or_create_research_environment_security_group( - self, resenv_metadata: ResearchEnvironmentMetadata + self, resenv_metadata: ResearchEnvironmentMetadata ): if not resenv_metadata.needs_forc_support: return None @@ -1123,9 +1126,9 @@ def delete_server(self, openstack_id: str) -> None: server=server, security_group=sec ) if ( - sg["name"] != self.DEFAULT_SECURITY_GROUP_NAME - and ("bibigrid" not in sec.name or "master" not in server.name) - and not self.is_security_group_in_use(security_group_id=sec.id) + sg["name"] != self.DEFAULT_SECURITY_GROUP_NAME + and ("bibigrid" not in sec.name or "master" not in server.name) + and not self.is_security_group_in_use(security_group_id=sec.id) ): self.openstack_connection.delete_security_group(sg) self.openstack_connection.compute.delete_server(server.id, force=True) @@ -1162,12 +1165,11 @@ def get_vm_ports(self, openstack_id: str) -> dict[str, str]: return {"port": str(ssh_port), "udp": str(udp_port)} def create_userdata( - self, - volume_ids_path_new: list[dict[str, str]], - volume_ids_path_attach: list[dict[str, str]], - additional_keys: list[str], + self, + volume_ids_path_new: list[dict[str, str]], + volume_ids_path_attach: list[dict[str, str]], + additional_keys: list[str], ) -> str: - unlock_ubuntu_user_script = "#!/bin/bash\npasswd -u ubuntu\n" unlock_ubuntu_user_script_encoded = encodeutils.safe_encode( unlock_ubuntu_user_script.encode("utf-8") @@ -1177,9 +1179,9 @@ def create_userdata( if additional_keys: add_key_script = self.create_add_keys_script(keys=additional_keys) init_script = ( - add_key_script - + encodeutils.safe_encode("\n".encode("utf-8")) - + init_script + add_key_script + + encodeutils.safe_encode("\n".encode("utf-8")) + + init_script ) if volume_ids_path_new or volume_ids_path_attach: mount_script = self.create_mount_init_script( @@ -1187,26 +1189,25 @@ def create_userdata( attach_volumes=volume_ids_path_attach, ) init_script = ( - init_script - + encodeutils.safe_encode("\n".encode("utf-8")) - + mount_script - + init_script + + encodeutils.safe_encode("\n".encode("utf-8")) + + mount_script ) return init_script def start_server( - self, - flavor_name: str, - image_name: str, - servername: str, - metadata: dict[str, str], - public_key: str, - research_environment_metadata: Union[ResearchEnvironmentMetadata, None] = None, - volume_ids_path_new: Union[list[dict[str, str]], None] = None, - volume_ids_path_attach: Union[list[dict[str, str]], None] = None, - additional_keys: Union[list[str], None] = None, - additional_security_group_ids: Union[list[str], None] = None, + self, + flavor_name: str, + image_name: str, + servername: str, + metadata: dict[str, str], + public_key: str, + research_environment_metadata: Union[ResearchEnvironmentMetadata, None] = None, + volume_ids_path_new: Union[list[dict[str, str]], None] = None, + volume_ids_path_attach: Union[list[dict[str, str]], None] = None, + additional_keys: Union[list[str], None] = None, + additional_security_group_ids: Union[list[str], None] = None, ) -> str: logger.info(f"Start Server {servername}") @@ -1288,16 +1289,16 @@ def start_server( raise DefaultException(message=str(e)) def start_server_with_playbook( - self, - flavor_name: str, - image_name: str, - servername: str, - metadata: dict[str, str], - research_environment_metadata: ResearchEnvironmentMetadata, - volume_ids_path_new: list[dict[str, str]] = None, # type: ignore - volume_ids_path_attach: list[dict[str, str]] = None, # type: ignore - additional_keys: list[str] = None, # type: ignore - additional_security_group_ids=None, # type: ignore + self, + flavor_name: str, + image_name: str, + servername: str, + metadata: dict[str, str], + research_environment_metadata: ResearchEnvironmentMetadata, + volume_ids_path_new: list[dict[str, str]] = None, # type: ignore + volume_ids_path_attach: list[dict[str, str]] = None, # type: ignore + additional_keys: list[str] = None, # type: ignore + additional_security_group_ids=None, # type: ignore ) -> tuple[str, str]: logger.info(f"Start Server {servername}") @@ -1433,16 +1434,16 @@ def add_udp_security_group(self, server_id): return def add_cluster_machine( - self, - cluster_id: str, - cluster_user: str, - cluster_group_id: list[str], - image_name: str, - flavor_name: str, - name: str, - key_name: str, - batch_idx: int, - worker_idx: int, + self, + cluster_id: str, + cluster_user: str, + cluster_group_id: list[str], + image_name: str, + flavor_name: str, + name: str, + key_name: str, + batch_idx: int, + worker_idx: int, ) -> str: logger.info(f"Add machine to {cluster_id}") image: Image = self.get_image(name_or_id=image_name, replace_inactive=True) diff --git a/simple_vm_client/test_openstack_connector.py b/simple_vm_client/test_openstack_connector.py index c731cc4..a1af8f8 100644 --- a/simple_vm_client/test_openstack_connector.py +++ b/simple_vm_client/test_openstack_connector.py @@ -16,9 +16,10 @@ from openstack.image.v2 import image as image_module from openstack.network.v2.network import Network from openstack.test import fakes -from openstack_connector.openstack_connector import OpenStackConnector from oslo_utils import encodeutils -from ttypes import ( + +from .openstack_connector.openstack_connector import OpenStackConnector +from .ttypes import ( DefaultException, ImageNotFoundException, OpenStackConflictException, @@ -139,7 +140,7 @@ def test_load_config_yml(self): self.openstack_connector.FORC_SECURITY_GROUP_ID, "forc_security_group_id" ) - @patch("openstack_connector.openstack_connector.logger") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger") def test_load_env_config_username_password(self, mock_logger): openstack_connector = self.init_openstack_connector() @@ -159,7 +160,7 @@ def test_load_env_config_username_password(self, mock_logger): # Assert that logger.info was called with the expected message mock_logger.info.assert_called_once_with("Load environment config: OpenStack") - @patch("openstack_connector.openstack_connector.logger") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger") @patch.dict( os.environ, { @@ -191,7 +192,7 @@ def test_load_env_config_application_credentials(self, mock_logger): ] mock_logger.info.assert_has_calls(expected_calls, any_order=False) - @patch("openstack_connector.openstack_connector.logger") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger") @patch.dict(os.environ, {"OS_AUTH_URL": ""}) def test_load_env_config_missing_os_auth_url(self, mock_logger): openstack_connector = self.init_openstack_connector() @@ -206,7 +207,7 @@ def test_load_env_config_missing_os_auth_url(self, mock_logger): # Assert that sys.exit was called with status code 1 mock_exit.assert_called_once_with(1) - @patch("openstack_connector.openstack_connector.logger") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger") @patch.dict(os.environ, {"USE_APPLICATION_CREDENTIALS": "True"}) def test_load_env_config_missing_app_cred_vars(self, mock_logger): # Create an instance of OpenStackConnector @@ -224,7 +225,7 @@ def test_load_env_config_missing_app_cred_vars(self, mock_logger): # Assert that sys.exit was called with status code 1 mock_exit.assert_called_once_with(1) - @patch("openstack_connector.openstack_connector.logger") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger") @patch.dict( os.environ, { @@ -274,14 +275,14 @@ def test_get_default_security_groups(self): default_security_groups, self.openstack_connector.DEFAULT_SECURITY_GROUPS ) - @patch("openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_get_image(self, mock_logger_info): self.mock_openstack_connection.get_image.return_value = EXPECTED_IMAGE result = self.openstack_connector.get_image(EXPECTED_IMAGE.id) mock_logger_info.assert_called_once_with(f"Get Image {EXPECTED_IMAGE.id}") self.assertEqual(result, EXPECTED_IMAGE) - @patch("openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_get_image_not_found_exception(self, mock_logger_info): # Configure the mock_openstack_connection.get_image to return None self.mock_openstack_connection.get_image.return_value = None @@ -295,7 +296,7 @@ def test_get_image_not_found_exception(self, mock_logger_info): # Assert that the exception contains the expected message and image ID self.assertEqual(context.exception.message, "Image nonexistent_id not found!") - @patch("openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_get_image_not_active_exception(self, mock_logger_info): # Configure the mock_openstack_connection.get_image to return the not active image self.mock_openstack_connection.get_image.return_value = INACTIVE_IMAGE @@ -313,7 +314,7 @@ def test_get_image_not_active_exception(self, mock_logger_info): f"Image {INACTIVE_IMAGE.name} found but not active!", ) - @patch("openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_get_images(self, mock_logger_info): # Configure the mock_openstack_connection.image.images to return the fake images self.mock_openstack_connection.image.images.return_value = IMAGES @@ -328,7 +329,7 @@ def test_get_images(self, mock_logger_info): # Assert that the method returns the expected result self.assertEqual(result, IMAGES) - @patch("openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_get_active_image_by_os_version(self, mock_logger_info): # Generate a set of fake images with different properties os_version = "22.04" @@ -347,7 +348,7 @@ def test_get_active_image_by_os_version(self, mock_logger_info): # Assert that the method returns the expected image self.assertEqual(result, EXPECTED_IMAGE) - @patch("openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_get_active_image_by_os_version_not_found_exception(self, mock_logger_info): # Configure the mock_openstack_connection.list_images to return an empty list self.mock_openstack_connection.list_images.return_value = [] @@ -393,8 +394,8 @@ def test_get_limits(self): self.mock_openstack_connection.get_volume_limits.return_value = volume_limits self.openstack_connector.get_limits() - @patch("openstack_connector.openstack_connector.logger.info") - @patch("openstack_connector.openstack_connector.logger.error") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.error") def test_create_server(self, mock_logger_error, mock_logger_info): # Prepare test data name = "test_server" @@ -443,8 +444,8 @@ def test_create_server(self, mock_logger_error, mock_logger_info): # Check if the method returns the fake server object self.assertEqual(result, fake_server) - @patch("openstack_connector.openstack_connector.logger.info") - @patch("openstack_connector.openstack_connector.logger.exception") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") def test_get_volume(self, mock_logger_exception, mock_logger_info): # Prepare test data name_or_id = "test_volume_id" @@ -467,7 +468,7 @@ def test_get_volume(self, mock_logger_exception, mock_logger_info): # Check if the method returns the fake volume object self.assertEqual(result, fake_volume) - @patch("openstack_connector.openstack_connector.logger.exception") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") def test_get_volume_exception(self, mock_logger_exception): # Prepare test data name_or_id = "non_existing_volume_id" @@ -484,8 +485,8 @@ def test_get_volume_exception(self, mock_logger_exception): # Check if the method logs the correct exception information mock_logger_exception.assert_called_once_with(f"No Volume with id {name_or_id}") - @patch("openstack_connector.openstack_connector.logger.info") - @patch("openstack_connector.openstack_connector.logger.exception") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") def test_delete_volume(self, mock_logger_exception, mock_logger_info): # Prepare test data volume_id = "test_volume_id" @@ -530,8 +531,8 @@ def test_delete_volume(self, mock_logger_exception, mock_logger_info): ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume(volume_id) - @patch("openstack_connector.openstack_connector.logger.info") - @patch("openstack_connector.openstack_connector.logger.error") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.error") def test_create_volume_snapshot(self, mock_logger_error, mock_logger_info): # Prepare test data volume_id = "test_volume_id" @@ -572,8 +573,8 @@ def test_create_volume_snapshot(self, mock_logger_error, mock_logger_info): volume_id, snapshot_name, snapshot_description ) - @patch("openstack_connector.openstack_connector.logger.info") - @patch("openstack_connector.openstack_connector.logger.exception") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") def test_get_volume_snapshot(self, mock_logger_exception, mock_logger_info): # Prepare test data snapshot_id = "test_snapshot_id" @@ -597,8 +598,8 @@ def test_get_volume_snapshot(self, mock_logger_exception, mock_logger_info): f"No volume Snapshot with id {snapshot_id}" ) - @patch("openstack_connector.openstack_connector.logger.info") - @patch("openstack_connector.openstack_connector.logger.exception") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") def test_delete_volume_snapshot(self, mock_logger_exception, mock_logger_info): # Prepare test data snapshot_id = "test_snapshot_id" @@ -646,7 +647,7 @@ def test_delete_volume_snapshot(self, mock_logger_exception, mock_logger_info): ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume_snapshot(snapshot_id) - @patch("openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_get_servers(self, mock_logger_info): # Prepare test data expected_servers = fakes.generate_fake_resources(server.Server, count=3) @@ -661,9 +662,9 @@ def test_get_servers(self, mock_logger_info): self.assertEqual(result_servers, expected_servers) mock_logger_info.assert_called_once_with("Get servers") - @patch("openstack_connector.openstack_connector.logger.error") - @patch("openstack_connector.openstack_connector.logger.exception") - @patch("openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.error") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_get_servers_by_ids( self, mock_logger_info, mock_logger_exception, mock_logger_error ): @@ -692,8 +693,8 @@ def test_get_servers_by_ids( mock_logger_error.assert_called_once_with("Requested VM id3 not found!") mock_logger_exception.assert_called_once_with("Requested VM id4 not found!\n ") - @patch("openstack_connector.openstack_connector.logger.exception") - @patch("openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_attach_volume_to_server(self, mock_logger_info, mock_logger_exception): # Prepare test data expected_attachment = { @@ -744,8 +745,8 @@ def test_attach_volume_to_server(self, mock_logger_info, mock_logger_exception): exc_info=True, ) - @patch("openstack_connector.openstack_connector.logger.exception") - @patch("openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_detach_volume(self, mock_logger_info, mock_logger_exception): # Prepare test data expected_server = fakes.generate_fake_resource(server.Server) @@ -787,8 +788,8 @@ def test_detach_volume(self, mock_logger_info, mock_logger_exception): f"Delete volume attachment (server: {server_id} volume: {volume_id}) failed!" ) - @patch("openstack_connector.openstack_connector.logger.exception") - @patch("openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_resize_volume(self, mock_logger_info, mock_logger_exception): # Prepare test data expected_volume = fakes.generate_fake_resource(volume.Volume) @@ -818,8 +819,8 @@ def test_resize_volume(self, mock_logger_info, mock_logger_exception): with self.assertRaises(DefaultException): self.openstack_connector.resize_volume(volume_id, size) - @patch("openstack_connector.openstack_connector.logger.exception") - @patch("openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_create_volume(self, mock_logger_info, mock_logger_exception): # Prepare test data volume_name = "test_volume" @@ -853,7 +854,7 @@ def test_create_volume(self, mock_logger_info, mock_logger_exception): f"Trying to create volume with {volume_storage} GB failed", exc_info=True ) - @patch("openstack_connector.openstack_connector.logger.exception") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") def test_get_network(self, mock_logger_exception): with tempfile.NamedTemporaryFile(mode="w+", delete=False) as temp_file: temp_file.write(CONFIG_DATA) @@ -877,8 +878,8 @@ def test_get_network(self, mock_logger_exception): ) mock_logger_exception.assert_not_called() # Ensure no exception is logged - @patch("openstack_connector.openstack_connector.logger.exception") - @patch("openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_import_existing_keypair(self, mock_logger_info, mock_logger_exception): # Mock the get_keypair method for existing keypair existing_keypair = fakes.generate_fake_resource(keypair.Keypair) @@ -900,8 +901,8 @@ def test_import_existing_keypair(self, mock_logger_info, mock_logger_exception): self.mock_openstack_connection.delete_keypair.assert_not_called() mock_logger_exception.assert_not_called() - @patch("openstack_connector.openstack_connector.logger.exception") - @patch("openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_import_non_existing_keypair(self, mock_logger_info, mock_logger_exception): # Mock the get_keypair method for non-existing keypair new_keypair = fakes.generate_fake_resource(keypair.Keypair) @@ -927,8 +928,8 @@ def test_import_non_existing_keypair(self, mock_logger_info, mock_logger_excepti self.mock_openstack_connection.delete_keypair.assert_not_called() mock_logger_exception.assert_not_called() - @patch("openstack_connector.openstack_connector.logger.exception") - @patch("openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_import_changed_keypair(self, mock_logger_info, mock_logger_exception): # Mock the get_keypair method for keypair with changed public_key changed_keypair = fakes.generate_fake_resource(keypair.Keypair) @@ -960,8 +961,8 @@ def test_import_changed_keypair(self, mock_logger_info, mock_logger_exception): ) mock_logger_exception.assert_not_called() - @patch("openstack_connector.openstack_connector.logger.exception") - @patch("openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_import_same_keypair(self, mock_logger_info, mock_logger_exception): # Mock the get_keypair method for keypair with same public_key same_keypair = fakes.generate_fake_resource(keypair.Keypair) @@ -983,7 +984,7 @@ def test_import_same_keypair(self, mock_logger_info, mock_logger_exception): mock_logger_info.assert_called_with(f"Get Keypair {same_keypair.name}") mock_logger_exception.assert_not_called() - @patch("openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_create_add_keys_script(self, mock_logger_info): # Prepare test data keys = ["key1", "key2", "key3"] @@ -1003,8 +1004,8 @@ def test_create_add_keys_script(self, mock_logger_info): # Check that the real script content matches the expected content self.assertEqual(result_script, expected_script_content) - @patch("openstack_connector.openstack_connector.socket.socket") - @patch("openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.socket.socket") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_netcat(self, mock_logger_info, mock_socket): # Replace with the actual host and port host = "example.com" @@ -1026,7 +1027,7 @@ def test_netcat(self, mock_logger_info, mock_socket): f"Checking SSH Connection {host}:{port} Result = 0" ) - @patch("openstack_connector.openstack_connector.logger.info") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_get_flavor(self, mock_logger_info): # Replace with the actual flavor name or ID expected_flavor = fakes.generate_fake_resource(flavor.Flavor) @@ -1044,7 +1045,7 @@ def test_get_flavor(self, mock_logger_info): name_or_id=expected_flavor.name, get_extra=True ) - @mock.patch("openstack_connector.openstack_connector.logger.info") + @mock.patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_get_flavors(self, mock_logger_info): # Replace with the actual flavors you want to simulate expected_flavors = list(fakes.generate_fake_resources(flavor.Flavor, count=3)) @@ -1064,7 +1065,7 @@ def test_get_flavors(self, mock_logger_info): get_extra=True ) - @mock.patch("openstack_connector.openstack_connector.logger.info") + @mock.patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_get_servers_by_bibigrid_id(self, mock_logger_info): # Replace with the actual Bibigrid ID you want to test bibigrid_id = "your_bibigrid_id" @@ -1089,8 +1090,10 @@ def test_get_servers_by_bibigrid_id(self, mock_logger_info): filters={"bibigrid_id": bibigrid_id, "name": bibigrid_id} ) - @mock.patch("openstack_connector.openstack_connector.logger.exception") - @mock.patch("openstack_connector.openstack_connector.logger.info") + @mock.patch( + "simple_vm_client.openstack_connector.openstack_connector.logger.exception" + ) + @mock.patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_create_snapshot(self, mock_logger_info, mock_logger_exception): # Replace with the actual parameters you want to test openstack_id = "your_openstack_id" @@ -1131,8 +1134,10 @@ def test_create_snapshot(self, mock_logger_info, mock_logger_exception): openstack_id, name, username, base_tags, description ) - @mock.patch("openstack_connector.openstack_connector.logger.exception") - @mock.patch("openstack_connector.openstack_connector.logger.info") + @mock.patch( + "simple_vm_client.openstack_connector.openstack_connector.logger.exception" + ) + @mock.patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_delete_image(self, mock_logger_info, mock_logger_exception): # Replace with the actual image_id you want to test fake_image = fakes.generate_fake_resource(image.Image) @@ -1156,7 +1161,7 @@ def test_delete_image(self, mock_logger_info, mock_logger_exception): f"Delete Image {fake_image.id} failed!" ) - @mock.patch("openstack_connector.openstack_connector.logger.info") + @mock.patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_get_public_images(self, mock_logger_info): # Replace with the actual public images you want to test images = list(fakes.generate_fake_resources(image.Image, count=3)) From 5c8e89283214f482066373a3f79bfb584430e3e9 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Wed, 20 Dec 2023 08:58:01 +0100 Subject: [PATCH 14/39] updated imports --- .github/workflows/coverage.yml | 45 ++++++++++++------- simple_vm_client/VirtualMachineHandler.py | 13 +++--- simple_vm_client/VirtualMachineServer.py | 5 ++- simple_vm_client/VirtualMachineService.py | 3 +- .../bibigrid_connector/bibigrid_connector.py | 5 ++- simple_vm_client/constants.py | 5 --- .../forc_connector/forc_connector.py | 7 +-- .../forc_connector/playbook/playbook.py | 7 +-- simple_vm_client/util/thrift_converter.py | 8 ++-- 9 files changed, 55 insertions(+), 43 deletions(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index 05e646d..aef2862 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -1,22 +1,33 @@ -name: 'coverage' +# This workflow will install dependencies, create coverage tests and run Pytest Coverage Comment +# For more information see: https://github.com/MishaKav/pytest-coverage-comment/ +name: pytest-coverage-comment on: - pull_request: - workflow_dispatch: + pull_request: + branches: + - '*' +jobs: + build: + runs-on: ubuntu-latest + steps: + - uses: actions/checkout@v3 + - name: Set up Python 3.11 + uses: actions/setup-python@v2 + with: + python-version: 3.11 -jobs: - coverage: - runs-on: ubuntu-latest - steps: - - uses: actions/checkout@v4.1.1 - - name: Install coverage - run: pip install coverage + - name: Install dependencies + run: | + python -m pip install --upgrade pip + pip install flake8 pytest pytest-cov + if [ -f requirements.txt ]; then pip install -r requirements.txt; fi - - name: Run Coverage - run: cd simple_vm_client && coverage xml coverage.xml + - name: Build coverage file + run: | + pytest --junitxml=pytest.xml --cov-report=term-missing:skip-covered --cov=simple_vm_client | tee pytest-coverage.txt - - name: Get Cover - uses: orgoro/coverage@v3.1 - with: - coverageFile: simple_vm_client/coverage.xml - token: ${{ secrets.GITHUB_TOKEN }} + - name: Pytest coverage comment + uses: MishaKav/pytest-coverage-comment@main + with: + pytest-coverage-path: ./pytest-coverage.txt + junitxml-path: ./pytest.xml diff --git a/simple_vm_client/VirtualMachineHandler.py b/simple_vm_client/VirtualMachineHandler.py index 547fd58..4920a92 100644 --- a/simple_vm_client/VirtualMachineHandler.py +++ b/simple_vm_client/VirtualMachineHandler.py @@ -7,12 +7,13 @@ from typing import TYPE_CHECKING -from bibigrid_connector.bibigrid_connector import BibigridConnector -from forc_connector.forc_connector import ForcConnector -from openstack_connector.openstack_connector import OpenStackConnector -from util import thrift_converter -from util.logger import setup_custom_logger -from VirtualMachineService import Iface +from simple_vm_client.bibigrid_connector.bibigrid_connector import BibigridConnector +from simple_vm_client.forc_connector.forc_connector import ForcConnector +from simple_vm_client.openstack_connector.openstack_connector import OpenStackConnector +from simple_vm_client.util import thrift_converter +from simple_vm_client.util.logger import setup_custom_logger + +from .VirtualMachineService import Iface if TYPE_CHECKING: from ttypes import ( diff --git a/simple_vm_client/VirtualMachineServer.py b/simple_vm_client/VirtualMachineServer.py index 1c57ff6..c797e2e 100644 --- a/simple_vm_client/VirtualMachineServer.py +++ b/simple_vm_client/VirtualMachineServer.py @@ -8,8 +8,9 @@ from thrift.protocol import TBinaryProtocol from thrift.server import TServer from thrift.transport import TSocket, TSSLSocket, TTransport -from VirtualMachineHandler import VirtualMachineHandler -from VirtualMachineService import Processor + +from simple_vm_client.VirtualMachineHandler import VirtualMachineHandler +from simple_vm_client.VirtualMachineService import Processor USERNAME = "OS_USERNAME" PASSWORD = "OS_PASSWORD" diff --git a/simple_vm_client/VirtualMachineService.py b/simple_vm_client/VirtualMachineService.py index 461f7e0..2ad7457 100644 --- a/simple_vm_client/VirtualMachineService.py +++ b/simple_vm_client/VirtualMachineService.py @@ -12,7 +12,8 @@ from thrift.Thrift import TApplicationException, TMessageType, TProcessor, TType from thrift.transport import TTransport from thrift.TRecursive import fix_spec -from ttypes import * + +from simple_vm_client.ttypes import * all_structs = [] diff --git a/simple_vm_client/bibigrid_connector/bibigrid_connector.py b/simple_vm_client/bibigrid_connector/bibigrid_connector.py index 24417ed..5c0d8d9 100644 --- a/simple_vm_client/bibigrid_connector/bibigrid_connector.py +++ b/simple_vm_client/bibigrid_connector/bibigrid_connector.py @@ -1,7 +1,8 @@ import requests import yaml -from ttypes import ClusterInfo, ClusterInstance -from util.logger import setup_custom_logger + +from simple_vm_client.ttypes import ClusterInfo, ClusterInstance +from simple_vm_client.util.logger import setup_custom_logger logger = setup_custom_logger(__name__) diff --git a/simple_vm_client/constants.py b/simple_vm_client/constants.py index 6a03e2f..50f402c 100644 --- a/simple_vm_client/constants.py +++ b/simple_vm_client/constants.py @@ -6,10 +6,5 @@ # options string: py # -from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException -from thrift.protocol.TProtocol import TProtocolException -from thrift.TRecursive import fix_spec -import sys -from ttypes import * VERSION = "1.0.0" diff --git a/simple_vm_client/forc_connector/forc_connector.py b/simple_vm_client/forc_connector/forc_connector.py index 1873cb8..78e035e 100644 --- a/simple_vm_client/forc_connector/forc_connector.py +++ b/simple_vm_client/forc_connector/forc_connector.py @@ -7,7 +7,8 @@ import requests import yaml from openstack.compute.v2.server import Server -from ttypes import ( + +from simple_vm_client.ttypes import ( Backend, BackendNotFoundException, CondaPackage, @@ -16,8 +17,8 @@ PlaybookResult, TemplateNotFoundException, ) -from util.logger import setup_custom_logger -from util.state_enums import VmTaskStates +from simple_vm_client.util.logger import setup_custom_logger +from simple_vm_client.util.state_enums import VmTaskStates from .playbook.playbook import Playbook from .template.template import ResearchEnvironmentMetadata, Template diff --git a/simple_vm_client/forc_connector/playbook/playbook.py b/simple_vm_client/forc_connector/playbook/playbook.py index 0852e95..e2f3fe4 100644 --- a/simple_vm_client/forc_connector/playbook/playbook.py +++ b/simple_vm_client/forc_connector/playbook/playbook.py @@ -6,9 +6,10 @@ import redis import ruamel.yaml -from ttypes import CondaPackage -from util.logger import setup_custom_logger -from util.state_enums import VmTaskStates + +from simple_vm_client.ttypes import CondaPackage +from simple_vm_client.util.logger import setup_custom_logger +from simple_vm_client.util.state_enums import VmTaskStates CONDA = "conda" MOSH = "mosh" diff --git a/simple_vm_client/util/thrift_converter.py b/simple_vm_client/util/thrift_converter.py index 98ca070..5b03259 100644 --- a/simple_vm_client/util/thrift_converter.py +++ b/simple_vm_client/util/thrift_converter.py @@ -7,9 +7,10 @@ from openstack.compute.v2.flavor import Flavor as OpenStack_Flavor from openstack.compute.v2.image import Image as OpenStack_Image from openstack.compute.v2.server import Server as OpenStack_Server -from ttypes import VM, Flavor, Image, Snapshot, Volume -from util.logger import setup_custom_logger -from util.state_enums import VmStates + +from simple_vm_client.ttypes import VM, Flavor, Image, Snapshot, Volume +from simple_vm_client.util.logger import setup_custom_logger +from simple_vm_client.util.state_enums import VmStates logger = setup_custom_logger(__name__) @@ -110,7 +111,6 @@ def os_to_thrift_server(openstack_server: OpenStack_Server) -> VM: image = None for values in openstack_server.addresses.values(): for address in values: - if address["OS-EXT-IPS:type"] == "floating": floating_ip = address["addr"] elif address["OS-EXT-IPS:type"] == "fixed": From fa994bf847fcdf751188049cef342be1bc4e8c6f Mon Sep 17 00:00:00 2001 From: dweinholz Date: Wed, 20 Dec 2023 09:01:33 +0100 Subject: [PATCH 15/39] fixed coverage setup --- setup.cfg | 2 -- 1 file changed, 2 deletions(-) diff --git a/setup.cfg b/setup.cfg index f75c6c1..fd6cc3e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -27,5 +27,3 @@ warn_unused_configs = True [coverage:run] include = simple_vm_client/* omit = *migrations*, *tests* -plugins = - django_coverage_plugin From ae269e10950f300ed2c9e29c49ff94a657d9b65b Mon Sep 17 00:00:00 2001 From: dweinholz Date: Wed, 20 Dec 2023 09:15:45 +0100 Subject: [PATCH 16/39] createlogfile if not exist --- simple_vm_client/util/logger.py | 5 +++++ 1 file changed, 5 insertions(+) diff --git a/simple_vm_client/util/logger.py b/simple_vm_client/util/logger.py index 5850cb7..2dfc78b 100644 --- a/simple_vm_client/util/logger.py +++ b/simple_vm_client/util/logger.py @@ -17,6 +17,11 @@ def setup_custom_logger(name): handler = logging.StreamHandler() handler.setFormatter(formatter) + # Create the log directory if it does not exist + log_dir = os.path.dirname(LOG_FILE) + if not os.path.exists(log_dir): + os.makedirs(log_dir) + file_handler = RotatingFileHandler( maxBytes=LOG_MAX_BYTES, backupCount=LOG_BACKUP_COUNT, filename=LOG_FILE ) From 33efaace8912a6cfb78b4e7f7ce078146633cb18 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Wed, 20 Dec 2023 09:23:35 +0100 Subject: [PATCH 17/39] only check specific files --- .github/workflows/coverage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index aef2862..b7d99ba 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -24,7 +24,7 @@ jobs: - name: Build coverage file run: | - pytest --junitxml=pytest.xml --cov-report=term-missing:skip-covered --cov=simple_vm_client | tee pytest-coverage.txt + pytest --junitxml=pytest.xml --cov-report=term-missing:skip-covered --cov=simple_vm_client/openstack_connector --cov=simple_vm_client/bibigrid_connector --cov=simple_vm_client/forc_connector | tee pytest-coverage.txt - name: Pytest coverage comment uses: MishaKav/pytest-coverage-comment@main From 2d80a6926c644bcee290c47eb000ffd5ce064890 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Wed, 20 Dec 2023 10:22:06 +0100 Subject: [PATCH 18/39] added tests for thrift_converter --- .github/workflows/coverage.yml | 2 +- .../util/test_thrift_converter.py | 190 ++++++++++++++++++ simple_vm_client/util/thrift_converter.py | 4 +- 3 files changed, 193 insertions(+), 3 deletions(-) create mode 100644 simple_vm_client/util/test_thrift_converter.py diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index b7d99ba..bce3cc4 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -24,7 +24,7 @@ jobs: - name: Build coverage file run: | - pytest --junitxml=pytest.xml --cov-report=term-missing:skip-covered --cov=simple_vm_client/openstack_connector --cov=simple_vm_client/bibigrid_connector --cov=simple_vm_client/forc_connector | tee pytest-coverage.txt + pytest --junitxml=pytest.xml --cov-report=term-missing:skip-covered --cov=simple_vm_client/openstack_connector --cov=simple_vm_client/bibigrid_connector -cov=simple_vm_client/util --cov=simple_vm_client/forc_connector | tee pytest-coverage.txt - name: Pytest coverage comment uses: MishaKav/pytest-coverage-comment@main diff --git a/simple_vm_client/util/test_thrift_converter.py b/simple_vm_client/util/test_thrift_converter.py new file mode 100644 index 0000000..d609364 --- /dev/null +++ b/simple_vm_client/util/test_thrift_converter.py @@ -0,0 +1,190 @@ +import unittest + +from openstack.block_storage.v2.snapshot import Snapshot as OpenStackVolumeSnapshot +from openstack.block_storage.v3.volume import Volume as OpenStackVolume +from openstack.compute.v2.flavor import Flavor as OpenStackFlavor +from openstack.compute.v2.server import Server as OpenStackServer +from openstack.image.v2.image import Image as OpenStackImage +from openstack.test import fakes + +from simple_vm_client.ttypes import VM, Flavor, Image, Snapshot, Volume +from simple_vm_client.util import thrift_converter + + +class TestThriftConverter(unittest.TestCase): + def test_os_to_thrift_image(self): + openstack_image = fakes.generate_fake_resource(OpenStackImage) + result_image = thrift_converter.os_to_thrift_image( + openstack_image=openstack_image + ) + properties = openstack_image.get("properties") + if not properties: + properties = {} + self.assertIsInstance(result_image, Image) + self.assertEqual(result_image.name, openstack_image.name) + self.assertEqual(result_image.min_disk, openstack_image.min_disk) + self.assertEqual(result_image.min_ram, openstack_image.min_ram) + self.assertEqual(result_image.status, openstack_image.status) + self.assertEqual(result_image.created_at, openstack_image.created_at) + self.assertEqual(result_image.updated_at, openstack_image.updated_at) + self.assertEqual(result_image.os_version, openstack_image.os_version) + self.assertEqual(result_image.openstack_id, openstack_image.id) + self.assertEqual(result_image.description, properties.get("description", "")) + self.assertEqual(result_image.tags, openstack_image.tags) + self.assertFalse(result_image.is_snapshot) + + def test_os_to_thrift_images(self): + openstack_images: list[OpenStackImage] = list( + fakes.generate_fake_resources(OpenStackImage, count=3) + ) + result_images: list[Image] = thrift_converter.os_to_thrift_images( + openstack_images=openstack_images + ) + self.assertEqual(len(result_images), len(openstack_images)) + for result_image, openstack_image in zip(result_images, openstack_images): + properties = openstack_image.get("properties") + if not properties: + properties = {} + self.assertIsInstance(result_image, Image) + self.assertEqual(result_image.name, openstack_image.name) + self.assertEqual(result_image.min_disk, openstack_image.min_disk) + self.assertEqual(result_image.min_ram, openstack_image.min_ram) + self.assertEqual(result_image.status, openstack_image.status) + self.assertEqual(result_image.created_at, openstack_image.created_at) + self.assertEqual(result_image.updated_at, openstack_image.updated_at) + self.assertEqual(result_image.os_version, openstack_image.os_version) + self.assertEqual(result_image.openstack_id, openstack_image.id) + self.assertEqual( + result_image.description, properties.get("description", "") + ) + self.assertEqual(result_image.tags, openstack_image.tags) + self.assertFalse(result_image.is_snapshot) + + def test_os_to_thrift_flavor(self): + openstack_flavor: OpenStackFlavor = fakes.generate_fake_resource( + OpenStackFlavor + ) + result_flavor: Flavor = thrift_converter.os_to_thrift_flavor( + openstack_flavor=openstack_flavor + ) + self.assertIsInstance(result_flavor, Flavor) + self.assertEqual(result_flavor.vcpus, openstack_flavor.vcpus) + self.assertEqual(result_flavor.ram, openstack_flavor.ram) + self.assertEqual(result_flavor.disk, openstack_flavor.disk) + self.assertEqual(result_flavor.name, openstack_flavor.name) + self.assertEqual(result_flavor.ephemeral_disk, openstack_flavor.ephemeral) + self.assertEqual(result_flavor.description, openstack_flavor.description or "") + + def test_os_to_thrift_flavors(self): + openstack_flavors: list[OpenStackFlavor] = list( + fakes.generate_fake_resources(OpenStackFlavor, count=3) + ) + result_flavors: list[Flavor] = thrift_converter.os_to_thrift_flavors( + openstack_flavors=openstack_flavors + ) + self.assertEqual(len(result_flavors), len(openstack_flavors)) + for result_flavor, openstack_flavor in zip(result_flavors, openstack_flavors): + self.assertIsInstance(result_flavor, Flavor) + self.assertEqual(result_flavor.vcpus, openstack_flavor.vcpus) + self.assertEqual(result_flavor.ram, openstack_flavor.ram) + self.assertEqual(result_flavor.disk, openstack_flavor.disk) + self.assertEqual(result_flavor.name, openstack_flavor.name) + self.assertEqual(result_flavor.ephemeral_disk, openstack_flavor.ephemeral) + self.assertEqual( + result_flavor.description, openstack_flavor.description or "" + ) + + def test_os_to_thrift_volume(self): + openstack_volume: OpenStackVolume = fakes.generate_fake_resource( + OpenStackVolume + ) + result_volume: Volume = thrift_converter.os_to_thrift_volume( + openstack_volume=openstack_volume + ) + + if isinstance(openstack_volume.get("attachments"), list): + device = openstack_volume.attachments[0]["device"] + server_id = openstack_volume.attachments[0]["server_id"] + else: + device = None + server_id = None + self.assertIsInstance(result_volume, Volume) + self.assertEqual(result_volume.status, openstack_volume.status) + self.assertEqual(result_volume.id, openstack_volume.id) + self.assertEqual(result_volume.name, openstack_volume.name) + self.assertEqual(result_volume.description, openstack_volume.description) + self.assertEqual(result_volume.size, openstack_volume.size) + self.assertEqual(result_volume.device, device) + self.assertEqual(result_volume.server_id, server_id) + + def test_os_to_thrift_volume_snapshot(self): + openstack_volume_snapshot: OpenStackVolumeSnapshot = ( + fakes.generate_fake_resource(OpenStackVolumeSnapshot) + ) + result_volume_snapshot: Snapshot = ( + thrift_converter.os_to_thrift_volume_snapshot( + openstack_snapshot=openstack_volume_snapshot + ) + ) + self.assertIsInstance(result_volume_snapshot, Snapshot) + self.assertEqual( + result_volume_snapshot.status, openstack_volume_snapshot.status + ) + self.assertEqual(result_volume_snapshot.id, openstack_volume_snapshot.id) + self.assertEqual(result_volume_snapshot.name, openstack_volume_snapshot.name) + self.assertEqual( + result_volume_snapshot.description, openstack_volume_snapshot.description + ) + self.assertEqual( + result_volume_snapshot.created_at, openstack_volume_snapshot.created_at + ) + self.assertEqual(result_volume_snapshot.size, openstack_volume_snapshot.size) + self.assertEqual( + result_volume_snapshot.volume_id, openstack_volume_snapshot.volume_id + ) + + def test_os_to_thrift_server(self): + openstack_server = fakes.generate_fake_resource(OpenStackServer) + openstack_flavor: OpenStackFlavor = fakes.generate_fake_resource( + OpenStackFlavor + ) + openstack_image = fakes.generate_fake_resource(OpenStackImage) + + openstack_server.flavor = openstack_flavor + openstack_server.image = openstack_image + + result_server: VM = thrift_converter.os_to_thrift_server( + openstack_server=openstack_server + ) + self.assertIsInstance(result_server, VM) + self.assertIsInstance(result_server.flavor, Flavor) + self.assertIsInstance(result_server.image, Image) + self.assertEqual(result_server.metadata, openstack_server.metadata) + self.assertEqual(result_server.project_id, openstack_server.project_id) + self.assertEqual(result_server.keyname, openstack_server.key_name) + self.assertEqual(result_server.name, openstack_server.name) + self.assertEqual(result_server.created_at, openstack_server.created_at) + self.assertEqual(result_server.task_state, openstack_server.task_state) + self.assertEqual(result_server.vm_state, openstack_server.vm_state) + + def test_os_to_thrift_servers(self): + openstack_servers: list[OpenStackFlavor] = list( + fakes.generate_fake_resources(OpenStackServer, count=3) + ) + for openstack_server in openstack_servers: + openstack_flavor: OpenStackFlavor = fakes.generate_fake_resource( + OpenStackFlavor + ) + openstack_image = fakes.generate_fake_resource(OpenStackImage) + + openstack_server.flavor = openstack_flavor + openstack_server.image = openstack_image + + result_servers: VM = thrift_converter.os_to_thrift_servers( + openstack_servers=openstack_servers + ) + self.assertEqual(len(result_servers), len(openstack_servers)) + + +if __name__ == "__main__": + unittest.main() diff --git a/simple_vm_client/util/thrift_converter.py b/simple_vm_client/util/thrift_converter.py index 5b03259..cf39c0d 100644 --- a/simple_vm_client/util/thrift_converter.py +++ b/simple_vm_client/util/thrift_converter.py @@ -62,7 +62,8 @@ def os_to_thrift_flavors(openstack_flavors: list[OpenStack_Flavor]) -> list[Flav def os_to_thrift_volume(openstack_volume: OpenStack_Volume) -> Volume: if not openstack_volume: return Volume(status=VmStates.NOT_FOUND) - if openstack_volume.get("attachments"): + + if isinstance(openstack_volume.get("attachments"), list): device = openstack_volume.attachments[0]["device"] server_id = openstack_volume.attachments[0]["server_id"] else: @@ -103,7 +104,6 @@ def os_to_thrift_server(openstack_server: OpenStack_Server) -> VM: return VM(vm_state=VmStates.NOT_FOUND) fixed_ip = "" floating_ip = "" - flavor = os_to_thrift_flavor(openstack_flavor=openstack_server.flavor) if openstack_server.image: image = os_to_thrift_image(openstack_image=openstack_server.image) From 0e13af841e41a7cae1fac6c565e75139cd202b8f Mon Sep 17 00:00:00 2001 From: dweinholz Date: Wed, 20 Dec 2023 10:26:25 +0100 Subject: [PATCH 19/39] removed .coveragerc --- simple_vm_client/.coveragerc | 6 ------ 1 file changed, 6 deletions(-) delete mode 100644 simple_vm_client/.coveragerc diff --git a/simple_vm_client/.coveragerc b/simple_vm_client/.coveragerc deleted file mode 100644 index 2bd8a6f..0000000 --- a/simple_vm_client/.coveragerc +++ /dev/null @@ -1,6 +0,0 @@ -[run] -source = openstack_connector - -[report] -exclude_lines = - pragma: no cover From 16140ad7a19977f2d1613b5391bf8fd163ec0eca Mon Sep 17 00:00:00 2001 From: dweinholz Date: Wed, 20 Dec 2023 11:24:11 +0100 Subject: [PATCH 20/39] 100% thrift_converter --- .../util/test_thrift_converter.py | 87 +++++++++++++++++-- simple_vm_client/util/thrift_converter.py | 20 ++--- 2 files changed, 91 insertions(+), 16 deletions(-) diff --git a/simple_vm_client/util/test_thrift_converter.py b/simple_vm_client/util/test_thrift_converter.py index d609364..33196ad 100644 --- a/simple_vm_client/util/test_thrift_converter.py +++ b/simple_vm_client/util/test_thrift_converter.py @@ -1,4 +1,5 @@ import unittest +from unittest.mock import patch from openstack.block_storage.v2.snapshot import Snapshot as OpenStackVolumeSnapshot from openstack.block_storage.v3.volume import Volume as OpenStackVolume @@ -9,6 +10,7 @@ from simple_vm_client.ttypes import VM, Flavor, Image, Snapshot, Volume from simple_vm_client.util import thrift_converter +from simple_vm_client.util.state_enums import VmStates class TestThriftConverter(unittest.TestCase): @@ -94,20 +96,56 @@ def test_os_to_thrift_flavors(self): result_flavor.description, openstack_flavor.description or "" ) + def test_os_to_thrift_volume_none(self): + result_volume: Volume = thrift_converter.os_to_thrift_volume( + openstack_volume=None + ) + self.assertIsInstance(result_volume, Volume) + self.assertEqual(result_volume.status, VmStates.NOT_FOUND) + + def test_os_to_thrift_volume_without_device(self): + openstack_volume: OpenStackVolume = fakes.generate_fake_resource( + OpenStackVolume + ) + openstack_volume["attachments"] = None + result_volume: Volume = thrift_converter.os_to_thrift_volume( + openstack_volume=openstack_volume + ) + self.assertIsInstance(result_volume, Volume) + + self.assertEqual(result_volume.device, None) + self.assertEqual(result_volume.server_id, None) + + def test_os_to_thrift_volume_with_device(self): + openstack_volume: OpenStackVolume = fakes.generate_fake_resource( + OpenStackVolume + ) + device = "/dev/vdb" + server_id = "1234" + openstack_volume["attachments"] = [{"device": device, "server_id": server_id}] + result_volume: Volume = thrift_converter.os_to_thrift_volume( + openstack_volume=openstack_volume + ) + self.assertIsInstance(result_volume, Volume) + self.assertEqual(result_volume.status, openstack_volume.status) + self.assertEqual(result_volume.id, openstack_volume.id) + self.assertEqual(result_volume.name, openstack_volume.name) + self.assertEqual(result_volume.description, openstack_volume.description) + self.assertEqual(result_volume.size, openstack_volume.size) + self.assertEqual(result_volume.device, device) + self.assertEqual(result_volume.server_id, server_id) + def test_os_to_thrift_volume(self): openstack_volume: OpenStackVolume = fakes.generate_fake_resource( OpenStackVolume ) + result_volume: Volume = thrift_converter.os_to_thrift_volume( openstack_volume=openstack_volume ) - if isinstance(openstack_volume.get("attachments"), list): - device = openstack_volume.attachments[0]["device"] - server_id = openstack_volume.attachments[0]["server_id"] - else: - device = None - server_id = None + device = None + server_id = None self.assertIsInstance(result_volume, Volume) self.assertEqual(result_volume.status, openstack_volume.status) self.assertEqual(result_volume.id, openstack_volume.id) @@ -117,6 +155,13 @@ def test_os_to_thrift_volume(self): self.assertEqual(result_volume.device, device) self.assertEqual(result_volume.server_id, server_id) + def test_os_to_thrift_volume_snapshot_none(self): + result_volume_snapshot: Snapshot = ( + thrift_converter.os_to_thrift_volume_snapshot(openstack_snapshot=None) + ) + self.assertIsInstance(result_volume_snapshot, Snapshot) + self.assertEqual(result_volume_snapshot.status, VmStates.NOT_FOUND) + def test_os_to_thrift_volume_snapshot(self): openstack_volume_snapshot: OpenStackVolumeSnapshot = ( fakes.generate_fake_resource(OpenStackVolumeSnapshot) @@ -143,6 +188,30 @@ def test_os_to_thrift_volume_snapshot(self): result_volume_snapshot.volume_id, openstack_volume_snapshot.volume_id ) + def test_os_to_thrift_server_no_image_and_addresses(self): + openstack_server = fakes.generate_fake_resource(OpenStackServer) + openstack_flavor: OpenStackFlavor = fakes.generate_fake_resource( + OpenStackFlavor + ) + openstack_server.flavor = openstack_flavor + floating_ip = "127.0.0.1" + fixed = "192.168.0.1" + openstack_server.addresses = { + "network": [ + {"OS-EXT-IPS:type": "floating", "addr": floating_ip}, + {"OS-EXT-IPS:type": "fixed", "addr": fixed}, + ] + } + openstack_server.image = None + result_server: VM = thrift_converter.os_to_thrift_server( + openstack_server=openstack_server + ) + self.assertIsInstance(result_server, VM) + self.assertIsInstance(result_server.flavor, Flavor) + self.assertEqual(result_server.image, None) + self.assertEqual(result_server.fixed_ip, fixed) + self.assertEqual(result_server.floating_ip, floating_ip) + def test_os_to_thrift_server(self): openstack_server = fakes.generate_fake_resource(OpenStackServer) openstack_flavor: OpenStackFlavor = fakes.generate_fake_resource( @@ -167,6 +236,12 @@ def test_os_to_thrift_server(self): self.assertEqual(result_server.task_state, openstack_server.task_state) self.assertEqual(result_server.vm_state, openstack_server.vm_state) + @patch("simple_vm_client.util.thrift_converter.logger") + def test_os_to_trhift_server_none(self, mock_logger): + result_server: VM = thrift_converter.os_to_thrift_server(openstack_server=None) + self.assertEqual(result_server.vm_state, VmStates.NOT_FOUND) + mock_logger.info.assert_called_once_with("Openstack server not found") + def test_os_to_thrift_servers(self): openstack_servers: list[OpenStackFlavor] = list( fakes.generate_fake_resources(OpenStackServer, count=3) diff --git a/simple_vm_client/util/thrift_converter.py b/simple_vm_client/util/thrift_converter.py index cf39c0d..8af1177 100644 --- a/simple_vm_client/util/thrift_converter.py +++ b/simple_vm_client/util/thrift_converter.py @@ -1,7 +1,5 @@ from __future__ import annotations -import logging - from openstack.block_storage.v2.snapshot import Snapshot as OpenStack_Snapshot from openstack.block_storage.v2.volume import Volume as OpenStack_Volume from openstack.compute.v2.flavor import Flavor as OpenStack_Flavor @@ -62,13 +60,15 @@ def os_to_thrift_flavors(openstack_flavors: list[OpenStack_Flavor]) -> list[Flav def os_to_thrift_volume(openstack_volume: OpenStack_Volume) -> Volume: if not openstack_volume: return Volume(status=VmStates.NOT_FOUND) - - if isinstance(openstack_volume.get("attachments"), list): - device = openstack_volume.attachments[0]["device"] - server_id = openstack_volume.attachments[0]["server_id"] - else: - device = None - server_id = None + attachments = openstack_volume.attachments + device = None + server_id = None + if attachments: + try: + device = openstack_volume.attachments[0]["device"] + server_id = openstack_volume.attachments[0]["server_id"] + except Exception: + pass volume = Volume( status=openstack_volume.status, id=openstack_volume.id, @@ -99,7 +99,7 @@ def os_to_thrift_volume_snapshot(openstack_snapshot: OpenStack_Snapshot) -> Snap def os_to_thrift_server(openstack_server: OpenStack_Server) -> VM: if not openstack_server: - logging.info("Openstack server not found") + logger.info("Openstack server not found") return VM(vm_state=VmStates.NOT_FOUND) fixed_ip = "" From 3271211875b792d3f9a9c02f7ffb2a382f261b8f Mon Sep 17 00:00:00 2001 From: dweinholz Date: Thu, 21 Dec 2023 15:27:18 +0100 Subject: [PATCH 21/39] feat(Tests):Template tests --- .../forc_connector/template/template.py | 309 ++++--- .../forc_connector/template/test_templates.py | 756 ++++++++++++++++++ .../openstack_connector.py | 10 +- 3 files changed, 943 insertions(+), 132 deletions(-) create mode 100644 simple_vm_client/forc_connector/template/test_templates.py diff --git a/simple_vm_client/forc_connector/template/template.py b/simple_vm_client/forc_connector/template/template.py index 75d24de..0d1b2b4 100644 --- a/simple_vm_client/forc_connector/template/template.py +++ b/simple_vm_client/forc_connector/template/template.py @@ -26,38 +26,57 @@ DIRECTION = "direction" PROTOCOL = "protocol" INFORMATION_FOR_DISPLAY = "information_for_display" -NO_TEMPLATE_NAMES = ["packer"] +NO_TEMPLATE_NAMES = ["packer", "optional", ".github", "cluster"] NEEDS_FORC_SUPPORT = "needs_forc_support" MIN_RAM = "min_ram" MIN_CORES = "min_cores" +FILENAME = "resenv_repo" class ResearchEnvironmentMetadata: def __init__( self, - name: str, + template_name: str, port: str, - security_group_name: str, - security_group_description: str, - security_group_ssh: bool, + securitygroup_name: str, + securitygroup_description: str, + securitygroup_ssh: bool, direction: str, protocol: str, + description: str, + logo_url: str, + info_url: str, information_for_display: str, + title: str, + community_driven: bool = False, + wiki_link: str = "", needs_forc_support: bool = True, min_ram: int = 0, min_cores: int = 0, + is_maintained: bool = True, + forc_versions: list[str] = [], + incompatible_versions: list[str] = [], ): - self.name = name + self.template_name = template_name self.port = port - self.security_group_name = security_group_name - self.security_group_description = security_group_description - self.security_group_ssh = security_group_ssh + self.wiki_link = wiki_link + self.description = description + self.title = title + self.community_driven = community_driven + self.logo_url = logo_url + self.info_url = info_url + self.securitygroup_name = securitygroup_name + self.securitygroup_description = securitygroup_description + self.securitygroup_ssh = securitygroup_ssh self.direction = direction self.protocol = protocol self.information_for_display = information_for_display self.needs_forc_support = needs_forc_support self.min_ram = min_ram self.min_cores = min_cores + self.is_maintained = is_maintained + self.forc_versions = forc_versions + self.incompatible_versions = incompatible_versions class Template(object): @@ -79,21 +98,17 @@ def __init__(self, github_playbook_repo: str, forc_url: str, forc_api_key: str): def loaded_research_env_metadata(self) -> dict[str, ResearchEnvironmentMetadata]: return self._loaded_resenv_metadata - def update_playbooks(self) -> None: - if self.GITHUB_PLAYBOOKS_REPO is None: - logger.info( - "Github playbooks repo url is None. Aborting download of playbooks." - ) - return + def _download_and_extract_playbooks(self) -> None: logger.info(f"STARTED update of playbooks from - {self.GITHUB_PLAYBOOKS_REPO}") r = requests.get(self.GITHUB_PLAYBOOKS_REPO) - filename = "resenv_repo" - with open(filename, "wb") as output_file: + with open(FILENAME, "wb") as output_file: output_file.write(r.content) logger.info("Downloading Completed") - with zipfile.ZipFile(filename, "r") as zip_ref: + + with zipfile.ZipFile(FILENAME, "r") as zip_ref: zip_ref.extractall(Template.get_playbook_dir()) + def _copy_resenvs_templates(self) -> None: resenvs_unziped_dir = next( filter( lambda f: os.path.isdir(f) and "resenvs" in f, @@ -104,79 +119,96 @@ def update_playbooks(self) -> None: resenvs_unziped_dir, Template.get_playbook_dir(), dirs_exist_ok=True ) shutil.rmtree(resenvs_unziped_dir, ignore_errors=True) + + def _update_loaded_templates(self) -> None: self._all_templates = [ name for name in os.listdir(Template.get_playbook_dir()) if name not in NO_TEMPLATE_NAMES and os.path.isdir(os.path.join(Template.get_playbook_dir(), name)) ] - logger.info(f"Loaded Template Names: {self._all_templates}") - self.install_ansible_galaxy_requirements() - templates_metadata: list[dict[str, str]] = self.load_resenv_metadata() + def _load_and_update_resenv_metadata(self) -> None: + templates_metadata = self._load_resenv_metadata() + for template_metadata in templates_metadata: try: - if template_metadata.get(NEEDS_FORC_SUPPORT, False): - metadata = ResearchEnvironmentMetadata( - name=template_metadata[TEMPLATE_NAME], - port=template_metadata[PORT], - security_group_name=template_metadata[SECURITYGROUP_NAME], - security_group_description=template_metadata[ - SECURITYGROUP_DESCRIPTION - ], - security_group_ssh=bool(template_metadata[SECURITYGROUP_SSH]), - direction=template_metadata[DIRECTION], - protocol=template_metadata[PROTOCOL], - information_for_display=template_metadata[ - INFORMATION_FOR_DISPLAY - ], - needs_forc_support=True, - min_cores=template_metadata.get(MIN_CORES, 0), - min_ram=template_metadata.get(MIN_RAM, 0), - ) - self.update_forc_allowed(template_metadata) - if metadata.name not in list(self._loaded_resenv_metadata.keys()): - self._loaded_resenv_metadata[metadata.name] = metadata - else: - if self._loaded_resenv_metadata[metadata.name] != metadata: - self._loaded_resenv_metadata[metadata.name] = metadata - + self._process_template_metadata(template_metadata) except Exception as e: logger.exception( - "Failed to parse Metadata yml: " - + str(template_metadata) - + "\n" - + str(e) + f"Failed to parse Metadata yml: {template_metadata}\n{e}" ) + + def _process_template_metadata( + self, template_metadata: ResearchEnvironmentMetadata + ) -> None: + if template_metadata.needs_forc_support: + self._update_forc_allowed(template_metadata) + + if template_metadata.template_name not in self._loaded_resenv_metadata: + self._loaded_resenv_metadata[ + template_metadata.template_name + ] = template_metadata + elif ( + self._loaded_resenv_metadata[template_metadata.template_name] + != template_metadata + ): + self._loaded_resenv_metadata[ + template_metadata.template_name + ] = template_metadata + + def update_playbooks(self) -> None: + if self.GITHUB_PLAYBOOKS_REPO is None: + logger.error( + "Github playbooks repo URL is None. Aborting download of playbooks." + ) + return + + self._download_and_extract_playbooks() + + self._copy_resenvs_templates() + + self._update_loaded_templates() + + logger.info(f"Loaded Template Names: {self._all_templates}") + + self._install_ansible_galaxy_requirements() + + self._load_and_update_resenv_metadata() + logger.info(f"Allowed Forc {self._forc_allowed}") - def cross_check_forc_image(self, tags: list[str]) -> bool: - get_url = self.TEMPLATES_URL + def _get_forc_templates(self) -> list[dict]: try: response = requests.get( - get_url, + self.TEMPLATES_URL, timeout=(30, 30), - headers={"X-API-KEY": FORC_API_KEY}, + headers={"X-API-KEY": self.FORC_API_KEY}, verify=True, ) - if response.status_code != 200: - return True - else: - templates = response.json() + response.raise_for_status() # Raise HTTPError for bad responses + return response.json() + except requests.RequestException as e: + logger.exception(f"Error while fetching FORC templates: {e}") + return [] + + def cross_check_forc_image(self, tags: list[str]) -> bool: + try: + templates = self._get_forc_templates() except Exception: logger.exception("Could not get templates from FORC.") templates = [] - cross_tags = list(set(self._all_templates).intersection(tags)) + + cross_tags = set(self._all_templates).intersection(tags) + for template_dict in templates: - if ( - template_dict["name"] in self._forc_allowed - and template_dict["name"] in cross_tags - ): - if ( - template_dict["version"] - in self._forc_allowed[template_dict["name"]] - ): + template_name = template_dict["name"] + + if template_name in self._forc_allowed and template_name in cross_tags: + template_version = template_dict["version"] + if template_version in self._forc_allowed[template_name]: return True + return False @staticmethod @@ -187,51 +219,58 @@ def get_playbook_dir() -> str: dir_path = f"{os.path.dirname(os.path.realpath(__file__))}/plays/" return dir_path - def add_forc_allowed_template(self, metadata: dict) -> None: - if metadata.get("needs_forc_support", False): - logger.info(f"Add {metadata} - to allowed templates") + def _add_forc_allowed_template(self, metadata: ResearchEnvironmentMetadata) -> None: + if metadata.needs_forc_support: + logger.info(f"Add {metadata.template_name} - to allowed templates") template = ResearchEnvironmentTemplate( - template_name=metadata["template_name"], - title=metadata["title"], - description=metadata["description"], - logo_url=metadata["logo_url"], - info_url=metadata["info_url"], - port=int(metadata["port"]), - incompatible_versions=metadata["incompatible_versions"], - is_maintained=metadata["is_maintained"], - information_for_display=metadata["information_for_display"], - min_ram=metadata.get("min_ram", 0), - min_cores=metadata.get("min_cores", 0), + template_name=metadata.template_name, + title=metadata.title, + description=metadata.description, + logo_url=metadata.logo_url, + info_url=metadata.info_url, + port=int(metadata.port), + incompatible_versions=metadata.incompatible_versions, + is_maintained=metadata.is_maintained, + information_for_display=metadata.information_for_display, + min_ram=metadata.min_ram, + min_cores=metadata.min_cores, ) self._allowed_forc_templates.append(template) - def load_resenv_metadata(self) -> list[dict[str, str]]: - templates_metada = [] + def _load_resenv_metadata(self) -> list[ResearchEnvironmentMetadata]: + templates_metadata = [] + for template in self._all_templates: - if template not in ["optional", "packer", ".github", "cluster"]: + if template not in NO_TEMPLATE_NAMES: template_metadata_name = f"{template}_metadata.yml" try: - with open( - f"{Template.get_playbook_dir()}{template}/{template}_metadata.yml" - ) as template_metadata: - try: - loaded_metadata = yaml.load( - template_metadata, Loader=yaml.FullLoader - ) - - templates_metada.append(loaded_metadata) - self.add_forc_allowed_template(metadata=loaded_metadata) - - except Exception as e: - logger.exception( - "Failed to parse Metadata yml: " - + template_metadata_name - + "\n" - + str(e) - ) + metadata_path = os.path.join( + Template.get_playbook_dir(), template, template_metadata_name + ) + + loaded_metadata = self._load_yaml(metadata_path) + + research_environment_metadata: ResearchEnvironmentMetadata = ( + ResearchEnvironmentMetadata(**loaded_metadata) + ) + + self._add_forc_allowed_template(research_environment_metadata) + templates_metadata.append(research_environment_metadata) except Exception as e: - logger.exception(f"No Metadata File found for {template} - {e}") - return templates_metada + self._handle_metadata_exception(template_metadata_name, template, e) + + return templates_metadata + + def _load_yaml(self, file_path: str) -> dict: + with open(file_path) as template_metadata: + return yaml.load(template_metadata, Loader=yaml.FullLoader) or {} + + def _handle_metadata_exception( + self, template_metadata_name: str, template: str, exception: Exception + ) -> None: + logger.exception( + f"Failed to load Metadata yml: {template_metadata_name}\n{str(exception)}" + ) def get_template_version_for(self, template: str) -> str: template_versions: list[str] = self._forc_allowed.get(template) # type: ignore @@ -239,7 +278,7 @@ def get_template_version_for(self, template: str) -> str: return template_versions[0] return "" - def install_ansible_galaxy_requirements(self): + def _install_ansible_galaxy_requirements(self): logger.info("Installing Ansible galaxy requirements..") stream = os.popen( f"ansible-galaxy install -r {Template.get_playbook_dir()}/packer/requirements.yml" @@ -254,25 +293,41 @@ def get_allowed_templates(self) -> list[ResearchEnvironmentTemplate]: return self._allowed_forc_templates - def update_forc_allowed(self, template_metadata: dict[str, str]) -> None: - if template_metadata["needs_forc_support"]: - name = template_metadata[TEMPLATE_NAME] - allowed_versions = [] - for forc_version in template_metadata[FORC_VERSIONS]: - get_url = f"{self.TEMPLATES_URL}/{name}/{forc_version}" - logger.info(f"Check Forc Allowed for - {get_url}") - try: - response = requests.get( - get_url, - timeout=(30, 30), - headers={"X-API-KEY": self.FORC_API_KEY}, - verify=True, - ) - logger.info(response.content) - if response.status_code == 200: - allowed_versions.append(forc_version) - except requests.Timeout as e: - logger.info(f"checking template/version timed out. {e}") - allowed_versions.sort(key=LooseVersion) - allowed_versions.reverse() - self._forc_allowed[name] = allowed_versions + def _get_forc_template_version( + self, template_name: str, forc_version: str + ) -> requests.Response: + get_url = f"{self.TEMPLATES_URL}/{template_name}/{forc_version}" + logger.info(f"Get Forc Template Version - {get_url}") + return requests.get( + get_url, + timeout=(30, 30), + headers={"X-API-KEY": self.FORC_API_KEY}, + verify=True, + ) + + def _update_forc_allowed_versions( + self, name: str, allowed_versions: list[str] + ) -> None: + allowed_versions.sort(key=LooseVersion, reverse=True) + self._forc_allowed[name] = allowed_versions + + def _update_forc_allowed( + self, template_metadata: ResearchEnvironmentMetadata + ) -> None: + if not template_metadata.needs_forc_support: + return + + name = template_metadata.template_name + allowed_versions = [] + + for forc_version in template_metadata.forc_versions: + try: + response = self._get_forc_template_version( + template_name=name, forc_version=forc_version + ) + if response.status_code == 200: + allowed_versions.append(forc_version) + except requests.Timeout as e: + logger.error(f"Checking template/version timed out. {e}") + + self._update_forc_allowed_versions(name, allowed_versions) diff --git a/simple_vm_client/forc_connector/template/test_templates.py b/simple_vm_client/forc_connector/template/test_templates.py new file mode 100644 index 0000000..d8dabbf --- /dev/null +++ b/simple_vm_client/forc_connector/template/test_templates.py @@ -0,0 +1,756 @@ +import copy +import os +import unittest +from distutils.version import LooseVersion +from unittest.mock import MagicMock, Mock, call, mock_open, patch + +import pytest +import requests +import yaml + +from simple_vm_client.forc_connector.template.template import ( + CONDA, + FILENAME, + ResearchEnvironmentMetadata, + Template, +) +from simple_vm_client.ttypes import ResearchEnvironmentTemplate + +METADATA_EXAMPLE = ResearchEnvironmentMetadata( + template_name="example_template", + port="8080", + wiki_link="https://example.com/wiki", + description="Example template for testing", + title="Example Template", + community_driven=True, + logo_url="https://example.com/logo.png", + info_url="https://example.com/info", + securitygroup_name="example_group", + securitygroup_description="Example security group", + securitygroup_ssh=True, + direction="inbound", + protocol="tcp", + information_for_display="Some information", + needs_forc_support=True, + min_ram=2, + min_cores=1, + is_maintained=True, + forc_versions=["1.0.0", "2.0.0"], + incompatible_versions=["3.0.0"], +) +MOCK_TEMPLATES = [ + ResearchEnvironmentTemplate( + template_name="template_1", + title="Template 1", + description="TemplateDesc1", + logo_url="https://logo1.de", + info_url="https://info1.de", + port=80, + incompatible_versions=["1.0.0"], + is_maintained=True, + information_for_display="Info1", + min_cores=10, + min_ram=2, + ), + ResearchEnvironmentTemplate( + template_name="template_2", + title="Template 2", + description="TemplateDesc2", + logo_url="https://logo2.de", + info_url="https://info2.de", + port=8080, + incompatible_versions=["2.0.0"], + is_maintained=False, + information_for_display="Info2", + min_cores=5, + min_ram=4, + ), + ResearchEnvironmentTemplate( + template_name="template_3", + title="Template 3", + description="TemplateDesc3", + logo_url="https://logo3.de", + info_url="https://info3.de", + port=8000, + incompatible_versions=["3.0.0"], + is_maintained=True, + information_for_display="Info3", + min_cores=8, + min_ram=6, + ), + ResearchEnvironmentTemplate( + template_name="template_4", + title="Template 4", + description="TemplateDesc4", + logo_url="https://logo4.de", + info_url="https://info4.de", + port=8088, + incompatible_versions=["4.0.0"], + is_maintained=False, + information_for_display="Info4", + min_cores=12, + min_ram=8, + ), +] + + +class TestTemplate(unittest.TestCase): + GITHUB_REPO_STAGING = ( + "https://github.com/deNBI/resenvs/archive/refs/heads/staging.zip" + ) + FORC_URL = "https://FAKE_URL.de" + + def get_metadata_example(self): + return copy.deepcopy(METADATA_EXAMPLE) + + def init_template( + self, github_playbook_repo=None, forc_url="", forc_api_key="1234" + ): + with patch.object(Template, "__init__", lambda x, y, z: None): + template = Template(None, None) + template.FORC_URL = forc_url + template.GITHUB_PLAYBOOKS_REPO = github_playbook_repo + template.FORC_API_KEY = forc_api_key + template.TEMPLATES_URL = f"{template.FORC_URL}templates" + template.BACKENDS_URL = f"{template.FORC_URL}backends" + template.BACKENDS_BY_OWNER_URL = f"{template.BACKENDS_URL}/byOwner" + template.BACKENDS_BY_TEMPLATE_URL = f"{template.BACKENDS_URL}/byTemplate" + template._forc_allowed: dict[str, list[str]] = {} + template._all_templates = [CONDA] + template._loaded_resenv_metadata: dict[ + str, ResearchEnvironmentMetadata + ] = {} + template._allowed_forc_templates: list[ResearchEnvironmentTemplate] = [] + + return template + + @patch("requests.get") + @patch("zipfile.ZipFile") + @patch("builtins.open", create=True) + @patch("simple_vm_client.forc_connector.template.template.logger.info") + def test_download_and_extract_playbooks( + self, mock_logger_info, mock_open, mock_zipfile, mock_requests + ): + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + + # Set up mock responses + mock_response = Mock() + mock_response.content = b"Mock content" + mock_requests.return_value = mock_response + + # Call the method to test + template._download_and_extract_playbooks() + + # Assert that the requests.get method was called with the correct URL + mock_requests.assert_called_once_with(template.GITHUB_PLAYBOOKS_REPO) + + # Assert logging messages + mock_logger_info.assert_any_call( + f"STARTED update of playbooks from - {template.GITHUB_PLAYBOOKS_REPO}" + ) + mock_logger_info.assert_any_call("Downloading Completed") + + # Assert that the open method was called with the correct file name and mode + mock_open.assert_called_once_with(FILENAME, "wb") + + # Assert that the write method was called on the file object + mock_open.return_value.__enter__.return_value.write.assert_called_once_with( + mock_response.content + ) + + # Assert that the zipfile.ZipFile constructor was called with the correct file name and mode + mock_zipfile.assert_called_once_with(FILENAME, "r") + + @patch("glob.glob") + @patch("shutil.copytree") + @patch("shutil.rmtree") + @patch( + "os.path.isdir", return_value=True + ) # Mock os.path.isdir to always return True + def test_copy_resenvs_templates( + self, mock_isdir, mock_rmtree, mock_copytree, mock_glob + ): + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + + # Set up mock responses + mock_glob.return_value = ["/path/to/directory/resenvs"] + + def mock_glob_side_effect(pattern): + if pattern == Template.get_playbook_dir() + "*": + return ["/path/to/directory/resenvs"] + else: + return [] + + mock_glob.side_effect = mock_glob_side_effect + + # Call the method to test + template._copy_resenvs_templates() + + # Assert that glob.glob was called with the correct parameters + mock_glob.assert_called_once_with(Template.get_playbook_dir() + "*") + + # Assert that shutil.copytree was called with the correct parameters + mock_copytree.assert_called_once_with( + "/path/to/directory/resenvs", + Template.get_playbook_dir(), + dirs_exist_ok=True, + ) + + # Assert that shutil.rmtree was called with the correct parameters + mock_rmtree.assert_called_once_with( + "/path/to/directory/resenvs", ignore_errors=True + ) + + @patch("os.listdir") + @patch("os.path.isdir") + def test_update_loaded_templates(self, mock_isdir, mock_listdir): + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + + # Set up mock responses + mock_listdir.return_value = [ + "template1", + "template2", + "non_template", + "optional", + "packer", + ".github", + "cluster", + ] + mock_isdir.side_effect = ( + lambda path: "non_template" not in path + ) # Mock isdir to return True for templates + + # Call the method to test + template._update_loaded_templates() + + # Assert that os.listdir was called with the correct parameters + mock_listdir.assert_called_once_with(Template.get_playbook_dir()) + + # Assert that os.path.isdir was called for each template + mock_isdir.assert_any_call( + os.path.join(Template.get_playbook_dir(), "template1") + ) + mock_isdir.assert_any_call( + os.path.join(Template.get_playbook_dir(), "template2") + ) + mock_isdir.assert_any_call( + os.path.join(Template.get_playbook_dir(), "non_template") + ) + with pytest.raises(AssertionError): + mock_isdir.assert_called_with( + os.path.join(Template.get_playbook_dir(), "packer") + ) + with pytest.raises(AssertionError): + mock_isdir.assert_called_with( + os.path.join(Template.get_playbook_dir(), ".github") + ) + with pytest.raises(AssertionError): + mock_isdir.assert_called_with( + os.path.join(Template.get_playbook_dir(), "cluster") + ) + with pytest.raises(AssertionError): + mock_isdir.assert_called_with( + os.path.join(Template.get_playbook_dir(), "optional") + ) + + # Assert that the _all_templates attribute is updated correctly + expected_templates = ["template1", "template2"] + self.assertEqual(template._all_templates, expected_templates) + + @patch("simple_vm_client.forc_connector.template.template.logger.error") + def test_update_playbooks_no_github_repo(self, mock_logger_error): + template = self.init_template() + template.update_playbooks() + mock_logger_error.assert_called_once_with( + "Github playbooks repo URL is None. Aborting download of playbooks." + ) + + @patch("simple_vm_client.forc_connector.template.template.os.popen") + @patch("simple_vm_client.forc_connector.template.template.logger.info") + def test_install_ansible_galaxy_requirements(self, mock_logger_info, mock_os_popen): + # Set up mocks + mock_os_popen_instance = MagicMock() + mock_os_popen.return_value = mock_os_popen_instance + mock_os_popen_instance.read.return_value = "Mocked output" + + # Create an instance of the Template class + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + + # Call the method to be tested + template._install_ansible_galaxy_requirements() + + # Assertions + mock_logger_info.assert_any_call("Installing Ansible galaxy requirements..") + mock_os_popen.assert_called_with( + f"ansible-galaxy install -r {Template.get_playbook_dir()}/packer/requirements.yml" + ) + mock_os_popen_instance.read.assert_called_once() + mock_logger_info.assert_any_call("Mocked output") + + def test_get_template_version_for_existing_template(self): + # Mock _forc_allowed with some versions for the template + template_name = "example_template" + expected_version = "1.2.3" + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + + template._forc_allowed = {template_name: [expected_version]} + + # Call the method to be tested + result_version = template.get_template_version_for(template_name) + + # Assertion + self.assertEqual(result_version, expected_version) + + def test_get_template_version_for_nonexistent_template(self): + # Mock _forc_allowed without the template + template_name = "nonexistent_template" + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + + template._forc_allowed = {} + + # Call the method to be tested + result_version = template.get_template_version_for(template_name) + + # Assertion + self.assertEqual(result_version, "") + + @patch("simple_vm_client.forc_connector.template.template.logger.info") + def test_get_allowed_templates(self, mock_logger_info): + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + + template._allowed_forc_templates = MOCK_TEMPLATES + + result_templates = template.get_allowed_templates() + + # Assertions + self.assertEqual(result_templates, MOCK_TEMPLATES) + + # Check log output if needed + mock_logger_info.assert_any_call("Allowed templates:") + for template in MOCK_TEMPLATES: + mock_logger_info.assert_any_call(template) + + @patch("simple_vm_client.forc_connector.template.template.requests.get") + @patch("simple_vm_client.forc_connector.template.template.logger.info") + def test_get_forc_template_version(self, mock_logger_info, mock_requests_get): + # Set up the mock response + expected_response = Mock() + mock_requests_get.return_value = expected_response + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + + # Call the method to be tested + result_response = template._get_forc_template_version( + template_name="mock_name", forc_version="mock_version" + ) + + # Assertions + self.assertEqual(result_response, expected_response) + + # Additional assertions based on your specific requirements + get_url = f"{template.TEMPLATES_URL}/mock_name/mock_version" + mock_logger_info.assert_called_once_with( + f"Get Forc Template Version - {get_url}" + ) + + mock_requests_get.assert_called_once_with( + get_url, + timeout=(30, 30), + headers={"X-API-KEY": template.FORC_API_KEY}, + verify=True, + ) + + def test_update_forc_allowed_versions(self): + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + + # Call the method to be tested + template._update_forc_allowed_versions( + name="mock_name", allowed_versions=["1.0.0", "2.0.0", "1.5.0"] + ) + + # Assertions + expected_forc_allowed = {"mock_name": ["2.0.0", "1.5.0", "1.0.0"]} + self.assertEqual(template._forc_allowed, expected_forc_allowed) + + @patch( + "simple_vm_client.forc_connector.template.template.Template._get_forc_template_version" + ) + def test_update_forc_allowed(self, mock_get_forc_template_version): + # Set up the mock for _get_forc_template_version + mock_get_forc_template_version.return_value.status_code = 200 + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + + # Call the method to be tested + metadata_example = self.get_metadata_example() + template._update_forc_allowed(metadata_example) + versions = template._forc_allowed[metadata_example.template_name] + versions.sort(key=LooseVersion, reverse=True) + + # Assertions + self.assertCountEqual(versions, metadata_example.forc_versions) + for forc_version in metadata_example.forc_versions: + f"{template.TEMPLATES_URL}/{metadata_example.template_name}/{forc_version}" + mock_get_forc_template_version.assert_any_call( + forc_version=forc_version, template_name=metadata_example.template_name + ) + + @patch( + "simple_vm_client.forc_connector.template.template.Template._get_forc_template_version" + ) + def test_update_forc_allowed_no_support_needed( + self, mock_get_forc_template_version + ): + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + metadata_example = self.get_metadata_example() + metadata_example.needs_forc_support = False + template._update_forc_allowed(metadata_example) + mock_get_forc_template_version.assert_not_called() + + @patch("simple_vm_client.forc_connector.template.template.requests.get") + @patch("simple_vm_client.forc_connector.template.template.logger.error") + @patch("simple_vm_client.forc_connector.template.template.logger.info") + def test_update_forc_allowed_with_exception( + self, mock_logger_info, mock_logger_error, mock_requests_get + ): + mock_requests_get.side_effect = requests.exceptions.Timeout("Timeout occurred") + + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + metadata_example = self.get_metadata_example() + + template._update_forc_allowed(metadata_example) + for forc_version in metadata_example.forc_versions: + get_url = f"{template.TEMPLATES_URL}/{metadata_example.template_name}/{forc_version}" + mock_logger_info.assert_any_call(f"Get Forc Template Version - {get_url}") + mock_requests_get.assert_any_call( + get_url, + timeout=(30, 30), + headers={"X-API-KEY": template.FORC_API_KEY}, + verify=True, + ) + + # Check that logger.error is called for each forc_version + expected_calls = [ + call("Checking template/version timed out. Timeout occurred") + ] * len(METADATA_EXAMPLE.forc_versions) + mock_logger_error.assert_has_calls(expected_calls, any_order=True) + + @patch("builtins.open", new_callable=mock_open, read_data="key: value\n") + @patch("simple_vm_client.forc_connector.template.template.yaml.load") + def test_load_yaml(self, mock_yaml_load, mock_open): + # Arrange + file_path = "fake/path/to/template_metadata.yml" + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + + # Set the return value for yaml.load + mock_yaml_load.return_value = {"key": "value"} + + # Act + result = template._load_yaml(file_path) + + # Assert + mock_open.assert_called_once_with(file_path) + mock_yaml_load.assert_called_once_with( + mock_open.return_value, Loader=yaml.FullLoader + ) + self.assertEqual(result, {"key": "value"}) + + @patch("simple_vm_client.forc_connector.template.template.logger.exception") + def test_handle_metadata_exception(self, mock_logger_exception): + # Arrange + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + template_metadata_name = "fake_metadata_name" + template_name = "fake_template" + fake_exception = ValueError("Fake error message") + + # Act + template._handle_metadata_exception( + template_metadata_name, template_name, fake_exception + ) + + # Assert + mock_logger_exception.assert_called_once_with( + f"Failed to load Metadata yml: {template_metadata_name}\n{str(fake_exception)}" + ) + + @patch("simple_vm_client.forc_connector.template.template.Template._load_yaml") + @patch( + "simple_vm_client.forc_connector.template.template.Template._add_forc_allowed_template" + ) + @patch( + "simple_vm_client.forc_connector.template.template.Template._handle_metadata_exception" + ) + def test_load_resenv_metadata( + self, mock_handle_exception, mock_add_forc_allowed, mock_load_yaml + ): + # Arrange + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + mock_template_metadata: ResearchEnvironmentMetadata = ( + self.get_metadata_example() + ) # Replace with your example metadata + mock_load_yaml.return_value = mock_template_metadata.__dict__ + template._all_templates = [mock_template_metadata.template_name] + + # Act + result = template._load_resenv_metadata() + + # Assert + mock_load_yaml.assert_called_once() # Check if _load_yaml was called + self.assertIsInstance( + mock_add_forc_allowed.call_args[0][0], ResearchEnvironmentMetadata + ) + + self.assertEqual( + len(result), 1 + ) # Check if one item is returned in the result list + self.assertIsInstance( + result[0], ResearchEnvironmentMetadata + ) # Check if the item is an instance of ResearchEnvironmentMetadata + mock_handle_exception.assert_not_called() # Ensure _handle_metadata_exception is not called + + @patch("simple_vm_client.forc_connector.template.template.Template._load_yaml") + @patch("simple_vm_client.forc_connector.template.template.logger.exception") + def test_load_resenv_metadata_exception( + self, mock_logger_exception, mock_load_yaml + ): + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + mock_template_metadata: ResearchEnvironmentMetadata = ( + self.get_metadata_example() + ) + exception_message = "Some error" + mock_load_yaml.side_effect = Exception("Some error") + mock_load_yaml.return_value = mock_template_metadata.__dict__ + template._all_templates = [mock_template_metadata.template_name] + + # Act + template._load_resenv_metadata() + + mock_logger_exception.assert_called_once_with( + f"Failed to load Metadata yml: {mock_template_metadata.template_name}_metadata.yml\n{exception_message}" + ) + + def test_add_forc_allowed_template(self): + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + mock_template_metadata: ResearchEnvironmentMetadata = ( + self.get_metadata_example() + ) + + # Act + template._add_forc_allowed_template(mock_template_metadata) + + # Assert + self.assertEqual( + len(template._allowed_forc_templates), 1 + ) # Check if the template was added + added_template = template._allowed_forc_templates[0] + self.assertIsInstance( + added_template, ResearchEnvironmentTemplate + ) # Check if the added item is an instance of ResearchEnvironmentTemplate + self.assertEqual( + added_template.template_name, mock_template_metadata.template_name + ) + + @patch("simple_vm_client.forc_connector.template.template.requests.get") + def test_get_forc_templates(self, mock_requests_get): + # Arrange + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + expected_response = [ + {"template_name": "template1"}, + {"template_name": "template2"}, + ] + mock_requests_get.return_value.json.return_value = expected_response + + # Act + result = template._get_forc_templates() + + # Assert + mock_requests_get.assert_called_once_with( + template.TEMPLATES_URL, + timeout=(30, 30), + headers={"X-API-KEY": template.FORC_API_KEY}, + verify=True, + ) + self.assertEqual( + result, expected_response + ) # Check if the result matches the expected response + + @patch("simple_vm_client.forc_connector.template.template.requests.get") + @patch("simple_vm_client.forc_connector.template.template.logger.exception") + def test_get_forc_templates_exception( + self, mock_logger_exception, mock_requests_get + ): + # Arrange + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + error_msg = "Error fetching FORC templates" + mock_requests_get.side_effect = requests.RequestException(error_msg) + + # Act + result = template._get_forc_templates() + + # Assert + mock_requests_get.assert_called_once_with( + template.TEMPLATES_URL, + timeout=(30, 30), + headers={"X-API-KEY": template.FORC_API_KEY}, + verify=True, + ) + self.assertEqual(result, []) + mock_logger_exception.assert_called_once_with( + f"Error while fetching FORC templates: {error_msg}" + ) + + @patch( + "simple_vm_client.forc_connector.template.template.Template._get_forc_templates" + ) + def test_cross_check_forc_image(self, mock_get_forc_templates): + # Arrange + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + tags = ["template1", "template2"] + allowed_templates = {"template1": ["version1"], "template2": ["version2"]} + mock_get_forc_templates.return_value = [ + {"name": "template1", "version": "version1"}, + {"name": "template2", "version": "version2"}, + ] + template._forc_allowed = allowed_templates + template._all_templates = tags + + # Act + result = template.cross_check_forc_image(tags) + + # Assert + mock_get_forc_templates.assert_called_once() + self.assertTrue(result) # Check if the result is True for a valid case + + @patch( + "simple_vm_client.forc_connector.template.template.Template._get_forc_templates" + ) + @patch("simple_vm_client.forc_connector.template.template.logger.exception") + def test_cross_check_forc_image_exception( + self, mock_logger_exception, mock_get_forc_templates + ): + # Arrange + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + tags = ["template1", "template2"] + mock_get_forc_templates.side_effect = Exception("Simulated exception") + + # Act + result = template.cross_check_forc_image(tags) + + # Assert + mock_get_forc_templates.assert_called_once() + self.assertFalse(result) + mock_logger_exception.assert_called_once_with( + "Could not get templates from FORC." + ) + + @patch( + "simple_vm_client.forc_connector.template.template.Template._update_forc_allowed" + ) + def test_process_template_metadata(self, mock_update_forc_allowed): + # Arrange + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + metadata = self.get_metadata_example() + + # Act + template._process_template_metadata(metadata) + + # Assert + mock_update_forc_allowed.assert_called_once_with(metadata) + self.assertEqual( + template._loaded_resenv_metadata[metadata.template_name], metadata + ) + + @patch( + "simple_vm_client.forc_connector.template.template.Template._update_forc_allowed" + ) + def test_process_template_metadata_existing_template( + self, mock_update_forc_allowed + ): + # Arrange + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + existing_metadata = self.get_metadata_example() + new_metadata = self.get_metadata_example() + new_metadata.port = 9000 + template._loaded_resenv_metadata[ + existing_metadata.template_name + ] = existing_metadata + # Act + template._process_template_metadata(new_metadata) + + # Assert + mock_update_forc_allowed.assert_called_once_with(new_metadata) + self.assertEqual( + template._loaded_resenv_metadata[existing_metadata.template_name], + new_metadata, + ) + + +if __name__ == "__main__": + unittest.main() diff --git a/simple_vm_client/openstack_connector/openstack_connector.py b/simple_vm_client/openstack_connector/openstack_connector.py index a937798..04c793c 100644 --- a/simple_vm_client/openstack_connector/openstack_connector.py +++ b/simple_vm_client/openstack_connector/openstack_connector.py @@ -897,23 +897,23 @@ def get_or_create_research_environment_security_group( if not resenv_metadata.needs_forc_support: return None logger.info( - f"Check if Security Group for resenv - {resenv_metadata.security_group_name} exists... " + f"Check if Security Group for resenv - {resenv_metadata.securitygroup_name} exists... " ) sec = self.openstack_connection.get_security_group( - name_or_id=resenv_metadata.security_group_name + name_or_id=resenv_metadata.securitygroup_name ) if sec: logger.info( - f"Security group {resenv_metadata.security_group_name} already exists." + f"Security group {resenv_metadata.securitygroup_name} already exists." ) return sec["id"] logger.info( - f"No security Group for {resenv_metadata.security_group_name} exists. Creating.. " + f"No security Group for {resenv_metadata.securitygroup_name} exists. Creating.. " ) new_security_group = self.openstack_connection.create_security_group( - name=resenv_metadata.security_group_name, description=resenv_metadata.name + name=resenv_metadata.securitygroup_name, description=resenv_metadata.name ) self.openstack_connection.network.create_security_group_rule( direction=resenv_metadata.direction, From 19cc25d7eb42569cf92b0bf513ff62de605015ed Mon Sep 17 00:00:00 2001 From: dweinholz Date: Thu, 21 Dec 2023 15:39:54 +0100 Subject: [PATCH 22/39] finished template tests --- .../forc_connector/template/test_templates.py | 135 ++++++++++++++++++ 1 file changed, 135 insertions(+) diff --git a/simple_vm_client/forc_connector/template/test_templates.py b/simple_vm_client/forc_connector/template/test_templates.py index d8dabbf..1fc2dca 100644 --- a/simple_vm_client/forc_connector/template/test_templates.py +++ b/simple_vm_client/forc_connector/template/test_templates.py @@ -751,6 +751,141 @@ def test_process_template_metadata_existing_template( new_metadata, ) + @patch( + "simple_vm_client.forc_connector.template.template.Template._load_resenv_metadata" + ) + @patch( + "simple_vm_client.forc_connector.template.template.Template._process_template_metadata" + ) + @patch("simple_vm_client.forc_connector.template.template.logger.exception") + def test_load_and_update_resenv_metadata( + self, + mock_logger_exception, + mock_process_template_metadata, + mock_load_resenv_metadata, + ): + # Arrange + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + # Mocking the _load_resenv_metadata method to return a list of ResearchEnvironmentMetadata instances + mock_metadata1 = self.get_metadata_example() + mock_metadata2 = self.get_metadata_example() + mock_load_resenv_metadata.return_value = [mock_metadata1, mock_metadata2] + + # Mocking the _process_template_metadata method to raise an exception for one of the metadata instances + mock_exception = Exception("Failed to parse Metadata yml") + mock_process_template_metadata.side_effect = [None, mock_exception] + + # Act + template._load_and_update_resenv_metadata() + + # Assert + mock_load_resenv_metadata.assert_called_once() # Check if _load_resenv_metadata was called + mock_process_template_metadata.assert_has_calls( + [unittest.mock.call(mock_metadata1), unittest.mock.call(mock_metadata2)] + ) # Check if _process_template_metadata was called for each metadata instance + mock_logger_exception.assert_called_once_with( + f"Failed to parse Metadata yml: {mock_metadata2}\n{mock_exception}" + ) # Check if logger.exception was called for the exception case + + @patch( + "simple_vm_client.forc_connector.template.template.Template._download_and_extract_playbooks" + ) + @patch( + "simple_vm_client.forc_connector.template.template.Template._copy_resenvs_templates" + ) + @patch( + "simple_vm_client.forc_connector.template.template.Template._update_loaded_templates" + ) + @patch( + "simple_vm_client.forc_connector.template.template.Template._install_ansible_galaxy_requirements" + ) + @patch( + "simple_vm_client.forc_connector.template.template.Template._load_and_update_resenv_metadata" + ) + @patch("simple_vm_client.forc_connector.template.template.logger.error") + @patch("simple_vm_client.forc_connector.template.template.logger.info") + def test_update_playbooks( + self, + mock_logger_info, + mock_logger_error, + mock_load_and_update_resenv_metadata, + mock_install_ansible_galaxy_requirements, + mock_update_loaded_templates, + mock_copy_resenvs_templates, + mock_download_and_extract_playbooks, + ): + # Arrange + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + + # Act + template.update_playbooks() + + # Assert + mock_logger_error.assert_not_called() # Check if logger.error was not called when GITHUB_PLAYBOOKS_REPO is not None + mock_download_and_extract_playbooks.assert_called_once() # Check if _download_and_extract_playbooks was called + mock_copy_resenvs_templates.assert_called_once() # Check if _copy_resenvs_templates was called + mock_update_loaded_templates.assert_called_once() # Check if _update_loaded_templates was called + mock_install_ansible_galaxy_requirements.assert_called_once() # Check if _install_ansible_galaxy_requirements was called + mock_load_and_update_resenv_metadata.assert_called_once() # Check if _load_and_update_resenv_metadata was called + mock_logger_info.assert_any_call( + f"Loaded Template Names: {template._all_templates}" + ) # Check if logger.info was called + + @patch( + "simple_vm_client.forc_connector.template.template.Template.update_playbooks" + ) + def test_init(self, mock_update_playbooks): + # Arrange + github_playbook_repo = "https://github.com/playbooks" + forc_url = "https://forc.example.com/" + forc_api_key = "your-api-key" + + # Act + instance = Template(github_playbook_repo, forc_url, forc_api_key) + + # Assert + self.assertEqual(instance.GITHUB_PLAYBOOKS_REPO, github_playbook_repo) + self.assertEqual(instance.FORC_URL, forc_url) + self.assertEqual(instance.FORC_API_KEY, forc_api_key) + self.assertEqual(instance.TEMPLATES_URL, f"{forc_url}templates") + self.assertEqual(instance.BACKENDS_URL, f"{forc_url}backends") + self.assertEqual(instance.BACKENDS_BY_OWNER_URL, f"{forc_url}backends/byOwner") + self.assertEqual( + instance.BACKENDS_BY_TEMPLATE_URL, f"{forc_url}backends/byTemplate" + ) + self.assertEqual(instance._forc_allowed, {}) + self.assertEqual(instance._all_templates, [CONDA]) + self.assertEqual(instance._loaded_resenv_metadata, {}) + self.assertEqual(instance._allowed_forc_templates, []) + mock_update_playbooks.assert_called_once() + + def test_loaded_research_env_metadata_property(self): + # Arrange + template = self.init_template( + github_playbook_repo=TestTemplate.GITHUB_REPO_STAGING, + forc_url=TestTemplate.FORC_URL, + ) + mock_metadata1 = self.get_metadata_example() + mock_metadata2 = self.get_metadata_example() + + # Act + template._loaded_resenv_metadata = { + "template1": mock_metadata1, + "template2": mock_metadata2, + } + result = template.loaded_research_env_metadata + + # Assert + self.assertEqual( + result, {"template1": mock_metadata1, "template2": mock_metadata2} + ) + if __name__ == "__main__": unittest.main() From 76557b5bab9e1ea1f4c96ae8ada6dd45c5de314c Mon Sep 17 00:00:00 2001 From: dweinholz Date: Fri, 22 Dec 2023 12:08:58 +0100 Subject: [PATCH 23/39] fix import --- simple_vm_client/forc_connector/playbook/playbook.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/simple_vm_client/forc_connector/playbook/playbook.py b/simple_vm_client/forc_connector/playbook/playbook.py index e2f3fe4..bb31dbc 100644 --- a/simple_vm_client/forc_connector/playbook/playbook.py +++ b/simple_vm_client/forc_connector/playbook/playbook.py @@ -7,6 +7,7 @@ import redis import ruamel.yaml +from simple_vm_client.forc_connector.template.template import Template from simple_vm_client.ttypes import CondaPackage from simple_vm_client.util.logger import setup_custom_logger from simple_vm_client.util.state_enums import VmTaskStates @@ -52,7 +53,6 @@ def __init__( self.research_environment_template = research_environment_template self.base_url = base_url # init temporary directories and mandatory generic files - from forc_connector.template.template import Template self.playbooks_dir: str = Template.get_playbook_dir() self.directory: TemporaryDirectory = TemporaryDirectory( From bd4c0b75e18d5ec65cfc2cb261a863cfa291df73 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Fri, 22 Dec 2023 12:13:19 +0100 Subject: [PATCH 24/39] fixing imports after thrift compiling not needed anymoore for docker image --- Makefile | 1 - simple_vm_client/VirtualMachineService.py | 26 ++++++++++++++++++++++- simple_vm_client/__init__.py | 1 + 3 files changed, 26 insertions(+), 2 deletions(-) diff --git a/Makefile b/Makefile index afa7f48..8fafcf2 100644 --- a/Makefile +++ b/Makefile @@ -18,7 +18,6 @@ thrift_py: ## Builds python code from thrift file thrift --gen py portal_client.thrift cp -a gen-py/VirtualMachineService/. simple_vm_client rm -rf gen-py - @echo Remember to fix the imports: for pip relative imports are needed, for others absolute imports dev-build: ## Build and Start the docker-compose.dev.yml docker-compose -f docker-compose.dev.yml up --build diff --git a/simple_vm_client/VirtualMachineService.py b/simple_vm_client/VirtualMachineService.py index 2ad7457..8a8fe31 100644 --- a/simple_vm_client/VirtualMachineService.py +++ b/simple_vm_client/VirtualMachineService.py @@ -13,7 +13,7 @@ from thrift.transport import TTransport from thrift.TRecursive import fix_spec -from simple_vm_client.ttypes import * +from .ttypes import * all_structs = [] @@ -1344,6 +1344,8 @@ def recv_delete_security_group_rule(self): iprot.readMessageEnd() if result.e is not None: raise result.e + if result.f is not None: + raise result.f return def delete_server(self, openstack_id): @@ -3980,6 +3982,9 @@ def process_delete_security_group_rule(self, seqid, iprot, oprot): except SecurityGroupRuleNotFoundException as e: msg_type = TMessageType.REPLY result.e = e + except DefaultException as f: + msg_type = TMessageType.REPLY + result.f = f except TApplicationException as ex: logging.exception("TApplication exception in handler") msg_type = TMessageType.EXCEPTION @@ -8484,14 +8489,17 @@ class delete_security_group_rule_result(object): """ Attributes: - e + - f """ def __init__( self, e=None, + f=None, ): self.e = e + self.f = f def read(self, iprot): if ( @@ -8511,6 +8519,11 @@ def read(self, iprot): self.e = SecurityGroupRuleNotFoundException.read(iprot) else: iprot.skip(ftype) + elif fid == 2: + if ftype == TType.STRUCT: + self.f = DefaultException.read(iprot) + else: + iprot.skip(ftype) else: iprot.skip(ftype) iprot.readFieldEnd() @@ -8527,6 +8540,10 @@ def write(self, oprot): oprot.writeFieldBegin("e", TType.STRUCT, 1) self.e.write(oprot) oprot.writeFieldEnd() + if self.f is not None: + oprot.writeFieldBegin("f", TType.STRUCT, 2) + self.f.write(oprot) + oprot.writeFieldEnd() oprot.writeFieldStop() oprot.writeStructEnd() @@ -8554,6 +8571,13 @@ def __ne__(self, other): [SecurityGroupRuleNotFoundException, None], None, ), # 1 + ( + 2, + TType.STRUCT, + "f", + [DefaultException, None], + None, + ), # 2 ) diff --git a/simple_vm_client/__init__.py b/simple_vm_client/__init__.py index e69de29..ba9c185 100644 --- a/simple_vm_client/__init__.py +++ b/simple_vm_client/__init__.py @@ -0,0 +1 @@ +__all__ = ["ttypes", "constants", "VirtualMachineService"] From 2d6555836dd65736bfca96eb24655761c240e069 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Fri, 22 Dec 2023 12:30:38 +0100 Subject: [PATCH 25/39] update test --- .github/workflows/coverage.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index bce3cc4..bdea76c 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -24,7 +24,7 @@ jobs: - name: Build coverage file run: | - pytest --junitxml=pytest.xml --cov-report=term-missing:skip-covered --cov=simple_vm_client/openstack_connector --cov=simple_vm_client/bibigrid_connector -cov=simple_vm_client/util --cov=simple_vm_client/forc_connector | tee pytest-coverage.txt + pytest --junitxml=pytest.xml --cov-report=term-missing:skip-covered --cov=simple_vm_client/openstack_connector --cov=simple_vm_client/bibigrid_connector --cov=simple_vm_client/util --cov=simple_vm_client/forc_connector | tee pytest-coverage.txt - name: Pytest coverage comment uses: MishaKav/pytest-coverage-comment@main From 5245bd07dc37ec829d467de30a845681176103a7 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Tue, 2 Jan 2024 11:25:25 +0100 Subject: [PATCH 26/39] adding unit test for playbook class --- .../forc_connector/playbook/playbook.py | 6 +- .../forc_connector/playbook/test_playbook.py | 205 ++++++++++++++++++ simple_vm_client/test_openstack_connector.py | 1 - 3 files changed, 210 insertions(+), 2 deletions(-) create mode 100644 simple_vm_client/forc_connector/playbook/test_playbook.py diff --git a/simple_vm_client/forc_connector/playbook/playbook.py b/simple_vm_client/forc_connector/playbook/playbook.py index bb31dbc..4646e62 100644 --- a/simple_vm_client/forc_connector/playbook/playbook.py +++ b/simple_vm_client/forc_connector/playbook/playbook.py @@ -55,12 +55,14 @@ def __init__( # init temporary directories and mandatory generic files self.playbooks_dir: str = Template.get_playbook_dir() + logger.info(self.playbooks_dir) self.directory: TemporaryDirectory = TemporaryDirectory( dir=f"{self.playbooks_dir}" ) self.private_key = NamedTemporaryFile( mode="w+", dir=self.directory.name, delete=False, prefix="private_key_" ) + logger.info(self.private_key) self.private_key.write(osi_private_key) self.private_key.close() @@ -308,7 +310,7 @@ def run_it(self) -> None: def check_status(self, openstack_id: str) -> int: logger.info(f"Check Status Playbook for VM {openstack_id}") done = self.process.poll() - logger.info(f" Status Playbook for VM {openstack_id}: {done}") + logger.info(f"Status Playbook for VM {openstack_id}: {done}") if done is None: logger.info( @@ -325,6 +327,7 @@ def check_status(self, openstack_id: str) -> int: self.redis.hset( openstack_id, "status", VmTaskStates.PLAYBOOK_SUCCESSFUL.value ) + self.returncode = self.process.returncode self.process.wait() return done @@ -332,6 +335,7 @@ def check_status(self, openstack_id: str) -> int: def get_logs(self) -> tuple[int, str, str]: self.log_file_stdout.seek(0, 0) lines_stdout = self.log_file_stdout.readlines() + logger.info(lines_stdout) for line in lines_stdout: self.stdout += line self.log_file_stderr.seek(0, 0) diff --git a/simple_vm_client/forc_connector/playbook/test_playbook.py b/simple_vm_client/forc_connector/playbook/test_playbook.py new file mode 100644 index 0000000..d3ddf39 --- /dev/null +++ b/simple_vm_client/forc_connector/playbook/test_playbook.py @@ -0,0 +1,205 @@ +import unittest +from unittest.mock import MagicMock, patch + +import redis + +from simple_vm_client.forc_connector.playbook.playbook import Playbook +from simple_vm_client.ttypes import CondaPackage +from simple_vm_client.util.state_enums import VmTaskStates + +DEFAULT_IP = "192.168.0.4" +DEFAULT_PORT = 9090 +DEFAULT_RESEARCH_ENVIRONMENT_TEMPLATE = "vscode" +DEFAULT_RESEARCH_ENVIRONMENT_VERSION = "v3" +DEFAULT_CONDA_PACKAGES = [ + CondaPackage(name="conda1", version="1.0.0"), + CondaPackage(name="conda2", version="2.0.0"), +] +DEFAULT_APT_PACKAGES = ["curl", "mosh"] +DEFAULT_PRIVATE_KEY = "a04f5f781e4b492d812c1dd3c7cb951f" +DEFAULT_PUBLIC_KEY = "public_key" +DEFAULT_CLOUD_SITE = "Bielefeld" +DEFAULT_BASE_URL = "https://localhost.base_url" +DEFAULT_POOL = MagicMock(spec=redis.ConnectionPool) + + +class TestPlaybook(unittest.TestCase): + def init_playbook(self): + with patch.object(Playbook, "__init__", lambda x, y, z: None): + playbook = Playbook(None, None) + return playbook + + @patch("simple_vm_client.forc_connector.playbook.playbook.TemporaryDirectory") + @patch("simple_vm_client.forc_connector.playbook.playbook.redis.Redis") + def test_cleanup(self, mock_redis, mock_temporary_directory): + # Arrange + openstack_id = "your_openstack_id" + mock_temporary_directory_instance = MagicMock() + mock_temporary_directory.return_value = mock_temporary_directory_instance + mock_redis_instance = MagicMock(spec=redis.StrictRedis) + mock_redis_instance.delete.return_value = None + mock_redis.return_value = mock_redis_instance + + instance = self.init_playbook() + instance.redis = mock_redis_instance + instance.directory = mock_temporary_directory_instance + + # Act + instance.cleanup(openstack_id) + + # Assert + mock_temporary_directory_instance.cleanup.assert_called_once() + mock_redis_instance.delete.assert_called_once_with(openstack_id) + + @patch("simple_vm_client.forc_connector.playbook.playbook.NamedTemporaryFile") + @patch("simple_vm_client.forc_connector.playbook.playbook.NamedTemporaryFile") + def test_get_logs(self, mock_log_file_stdout, mock_log_file_stderr): + # Arrange + instance = self.init_playbook() + + # Configure mock behavior for log files + stdout_content = "This is a sample stdout log." + stderr_content = "This is a sample stderr log." + + # Mocking log files + mock_log_file_stdout_instance = MagicMock() + mock_log_file_stdout_instance.readlines.return_value = ( + stdout_content.splitlines() + ) + mock_log_file_stdout.return_value = mock_log_file_stdout_instance + + mock_log_file_stderr_instance = MagicMock() + mock_log_file_stderr_instance.readlines.return_value = ( + stderr_content.splitlines() + ) + mock_log_file_stderr.return_value = mock_log_file_stderr_instance + instance.stderr = "" + instance.stdout = "" + instance.log_file_stderr = mock_log_file_stderr_instance + instance.log_file_stdout = mock_log_file_stdout_instance + instance.returncode = 0 + + # Act + returncode, stdout, stderr = instance.get_logs() + + # Assert + mock_log_file_stdout_instance.seek.assert_called_with(0, 0) + mock_log_file_stdout_instance.readlines.assert_called_with() + + mock_log_file_stderr_instance.seek.assert_called_with(0, 0) + mock_log_file_stderr_instance.readlines.assert_called_with() + self.assertEqual(returncode, instance.returncode) + self.assertEqual(stdout, stdout_content) + self.assertEqual(stderr, stderr_content) + + @patch("simple_vm_client.forc_connector.playbook.playbook.Playbook.cleanup") + @patch("simple_vm_client.forc_connector.playbook.playbook.Playbook.get_logs") + @patch("simple_vm_client.forc_connector.playbook.playbook.redis.Redis") + @patch("simple_vm_client.forc_connector.playbook.playbook.subprocess.Popen") + def test_stop(self, mock_popen, mock_redis, mock_get_logs, mock_cleanup): + # Arrange + instance = self.init_playbook() + openstack_id = "your_openstack_id" + mock_process = MagicMock() + mock_popen.return_value = mock_process + mock_get_logs.return_value = 0, "Stderr", "Stdout" + instance.redis = mock_redis + instance.directory = MagicMock() + instance.process = mock_process + + # Act + instance.stop(openstack_id) + + # Assert + mock_process.terminate.assert_called_once() + mock_get_logs.assert_called_once() + mock_redis.hset.assert_called_once_with( + name=f"pb_logs_{openstack_id}", + mapping={ + "returncode": mock_get_logs.return_value[0], + "stdout": mock_get_logs.return_value[1], + "stderr": mock_get_logs.return_value[2], + }, + ) + mock_cleanup.assert_called_once_with(openstack_id) + + @patch("simple_vm_client.forc_connector.playbook.playbook.logger") + def test_check_status_in_progress(self, mock_logger): + # Arrange + openstack_id = "your_openstack_id" + + instance = self.init_playbook() + + mock_process = MagicMock() + mock_process.poll.return_value = None + + instance.process = mock_process + + # Act + result = instance.check_status(openstack_id) + + # Assert + mock_logger.info.assert_any_call(f"Check Status Playbook for VM {openstack_id}") + mock_logger.info.assert_any_call(f"Status Playbook for VM {openstack_id}: None") + mock_logger.info.assert_any_call( + f"Playbook for (openstack_id) {openstack_id} still in progress." + ) + self.assertEqual(result, 3) + + @patch("simple_vm_client.forc_connector.playbook.playbook.logger") + def test_check_status_failed(self, mock_logger): + # Arrange + openstack_id = "your_openstack_id" + + instance = self.init_playbook() + mock_process = MagicMock() + mock_process.poll.return_value = 1 + mock_redis_instance = MagicMock(spec=redis.StrictRedis) + + instance.redis = mock_redis_instance + + instance.process = mock_process + + # Act + result = instance.check_status(openstack_id) + + # Assert + + mock_logger.info.assert_any_call(f"Check Status Playbook for VM {openstack_id}") + mock_logger.info.assert_any_call(f"Status Playbook for VM {openstack_id}: 1") + mock_redis_instance.hset.assert_called_once_with( + openstack_id, "status", VmTaskStates.PLAYBOOK_FAILED.value + ) + mock_logger.info.assert_any_call( + f"Playbook for (openstack_id) {openstack_id} has failed." + ) + self.assertEqual(result, 1) + + @patch("simple_vm_client.forc_connector.playbook.playbook.logger") + def test_check_status_success(self, mock_logger): + # Arrange + openstack_id = "your_openstack_id" + + instance = self.init_playbook() + mock_process = MagicMock() + mock_process.poll.return_value = 0 + mock_redis_instance = MagicMock(spec=redis.StrictRedis) + + instance.redis = mock_redis_instance + + instance.process = mock_process + + # Act + result = instance.check_status(openstack_id) + + # Assert + + mock_logger.info.assert_any_call(f"Check Status Playbook for VM {openstack_id}") + mock_logger.info.assert_any_call(f"Status Playbook for VM {openstack_id}: 0") + mock_redis_instance.hset.assert_called_once_with( + openstack_id, "status", VmTaskStates.PLAYBOOK_SUCCESSFUL.value + ) + mock_logger.info.assert_any_call( + f"Playbook for (openstack_id) {openstack_id} is successful." + ) + self.assertEqual(result, 0) diff --git a/simple_vm_client/test_openstack_connector.py b/simple_vm_client/test_openstack_connector.py index a1af8f8..604f62f 100644 --- a/simple_vm_client/test_openstack_connector.py +++ b/simple_vm_client/test_openstack_connector.py @@ -409,7 +409,6 @@ def test_create_server(self, mock_logger_error, mock_logger_info): # Mock the create_server method to return a fake server object fake_server = Server(**{"id": "fake_server_id", "name": name}) - print(f"test : {fake_server}") self.mock_openstack_connection.create_server.return_value = fake_server # Call the create_server method From 08cc142f02336d143927c7799ee6a68111327f7e Mon Sep 17 00:00:00 2001 From: dweinholz Date: Tue, 2 Jan 2024 15:50:47 +0100 Subject: [PATCH 27/39] feat(UnitTest):finished playbook.yml tests --- .../forc_connector/playbook/playbook.py | 85 +-- .../forc_connector/playbook/test_playbook.py | 524 +++++++++++++++++- 2 files changed, 567 insertions(+), 42 deletions(-) diff --git a/simple_vm_client/forc_connector/playbook/playbook.py b/simple_vm_client/forc_connector/playbook/playbook.py index 4646e62..1e4c5c6 100644 --- a/simple_vm_client/forc_connector/playbook/playbook.py +++ b/simple_vm_client/forc_connector/playbook/playbook.py @@ -55,14 +55,12 @@ def __init__( # init temporary directories and mandatory generic files self.playbooks_dir: str = Template.get_playbook_dir() - logger.info(self.playbooks_dir) self.directory: TemporaryDirectory = TemporaryDirectory( dir=f"{self.playbooks_dir}" ) self.private_key = NamedTemporaryFile( mode="w+", dir=self.directory.name, delete=False, prefix="private_key_" ) - logger.info(self.private_key) self.private_key.write(osi_private_key) self.private_key.close() @@ -163,53 +161,60 @@ def copy_and_init_research_environment(self) -> None: data[self.research_environment_template + "_vars"][ "base_url" ] = self.base_url - with open( - self.directory.name + playbook_var_yml, mode="w" - ) as variables: - self.yaml_exec.dump(data, variables) + with open(self.directory.name + playbook_var_yml, mode="w") as variables: + self.yaml_exec.dump(data, variables) self.add_to_playbook_lists( playbook_name_local, self.research_environment_template ) - except shutil.Error as e: - logger.exception(e) - self.add_tasks_only(playbook_name_local) - except IOError as e: - logger.exception(e) + except (shutil.Error, IOError): + logger.exception("Could not copy research environment template data") self.add_tasks_only(playbook_name_local) def copy_and_init_apt_packages(self) -> None: if not self.apt_packages: return - site_specific_yml = f"/{OPTIONAL}{'-' + self.cloud_site}.yml" + + site_specific_yml = f"{OPTIONAL}{'-' + self.cloud_site}.yml" playbook_name_local = OPTIONAL + if os.path.isfile(self.playbooks_dir + site_specific_yml): playbook_name_local = OPTIONAL + "-" + self.cloud_site - playbook_yml = f"/{playbook_name_local}.yml" - playbook_var_yml = f"/{OPTIONAL}_vars_file.yml" + + playbook_yml = f"{playbook_name_local}.yml" + playbook_var_yml = f"{OPTIONAL}_vars_file.yml" + try: - shutil.copy(self.playbooks_dir + playbook_yml, self.directory.name) + full_playbook_path = os.path.join(self.playbooks_dir, playbook_yml) + # Copy playbook YAML + shutil.copy(full_playbook_path, self.directory.name) + try: - shutil.copy(self.playbooks_dir + playbook_var_yml, self.directory.name) + full_vars_path = os.path.join(self.playbooks_dir, playbook_var_yml) + # Copy playbook vars YAML + shutil.copy(full_vars_path, self.directory.name) + + # Update apt_packages in playbook vars YAML with open( - self.directory.name + playbook_var_yml, mode="r" + os.path.join(self.directory.name, playbook_var_yml), mode="r" ) as variables: data = self.yaml_exec.load(variables) data["apt_packages"] = self.apt_packages - with open( - self.directory.name + playbook_var_yml, mode="w" - ) as variables: - self.yaml_exec.dump(data, variables) - self.add_to_playbook_lists(playbook_name_local, OPTIONAL) - except shutil.Error as e: - logger.exception(e) - self.add_tasks_only(playbook_name_local) - except IOError as e: - logger.exception(e) + + # Save updated playbook vars YAML + with open( + os.path.join(self.directory.name, playbook_var_yml), mode="w" + ) as variables: + self.yaml_exec.dump(data, variables) + + # Add to playbook lists + self.add_to_playbook_lists(playbook_name_local, OPTIONAL) + + except (shutil.Error, IOError): + logger.exception("Could not copy apt packages") self.add_tasks_only(playbook_name_local) - except shutil.Error as e: - logger.exception(e) - except IOError as e: - logger.exception(e) + + except (shutil.Error, IOError): + logger.exception("Could not copy apt packages") def copy_and_init_conda_packages(self) -> None: if not self.conda_packages: @@ -240,16 +245,13 @@ def copy_and_init_conda_packages(self) -> None: } ) data[CONDA + "_vars"]["packages"] = p_dict - with open( - self.directory.name + playbook_var_yml, mode="w" - ) as variables: - self.yaml_exec.dump(data, variables) - self.add_to_playbook_lists(playbook_name_local, CONDA) - except shutil.Error as e: - logger.exception(e) - self.add_tasks_only(playbook_name_local) - except IOError as e: - logger.exception(e) + with open(self.directory.name + playbook_var_yml, mode="w") as variables: + self.yaml_exec.dump(data, variables) + self.add_to_playbook_lists(playbook_name_local, CONDA) + except (shutil.Error, IOError): + logger.exception( + f"Could not open - {self.directory.name + playbook_var_yml}" + ) self.add_tasks_only(playbook_name_local) def add_to_playbook_lists( @@ -297,6 +299,7 @@ def add_always_tasks_only(self, playbook_name: str) -> None: ) def run_it(self) -> None: + logger.info("RUN ITTT") command_string = f"/usr/local/bin/ansible-playbook -v -i {self.inventory.name} {self.directory.name}/{self.playbook_exec_name}" command_string = shlex.split(command_string) # type: ignore logger.info(f"Run Playbook for {self.playbook_exec_name} - [{command_string}]") diff --git a/simple_vm_client/forc_connector/playbook/test_playbook.py b/simple_vm_client/forc_connector/playbook/test_playbook.py index d3ddf39..f04a670 100644 --- a/simple_vm_client/forc_connector/playbook/test_playbook.py +++ b/simple_vm_client/forc_connector/playbook/test_playbook.py @@ -1,9 +1,12 @@ +import os import unittest +from tempfile import TemporaryDirectory from unittest.mock import MagicMock, patch import redis -from simple_vm_client.forc_connector.playbook.playbook import Playbook +from simple_vm_client.forc_connector.playbook.playbook import CONDA, OPTIONAL, Playbook +from simple_vm_client.forc_connector.template.template import Template from simple_vm_client.ttypes import CondaPackage from simple_vm_client.util.state_enums import VmTaskStates @@ -27,6 +30,20 @@ class TestPlaybook(unittest.TestCase): def init_playbook(self): with patch.object(Playbook, "__init__", lambda x, y, z: None): playbook = Playbook(None, None) + playbook.vars_files = [] + playbook.tasks = [] + playbook.playbooks_dir = Template.get_playbook_dir() + playbook.returncode: int = -1 + playbook.stdout: str = "" + playbook.stderr: str = "" + playbook.conda_packages = [] + playbook.apt_packages = [] + playbook.always_tasks = [] + playbook.research_environment_template = None + playbook.cloud_site = DEFAULT_CLOUD_SITE + playbook.playbook_exec_name: str = "generic_playbook.yml" + + playbook.directory = TemporaryDirectory(dir=f"{playbook.playbooks_dir}") return playbook @patch("simple_vm_client.forc_connector.playbook.playbook.TemporaryDirectory") @@ -203,3 +220,508 @@ def test_check_status_success(self, mock_logger): f"Playbook for (openstack_id) {openstack_id} is successful." ) self.assertEqual(result, 0) + + @patch("simple_vm_client.forc_connector.playbook.playbook.logger") + @patch("simple_vm_client.forc_connector.playbook.playbook.subprocess.Popen") + def test_run_it(self, mock_popen, mock_logger): + # Arrange + playbook = self.init_playbook() + inventory = MagicMock() + directory = MagicMock() + playbook.inventory = inventory + playbook.directory = directory + playbook.log_file_stderr = MagicMock() + playbook.log_file_stdout = MagicMock() + + playbook.inventory.name = "inventory_name" + playbook.directory.name = "directory_name" + playbook.playbook_exec_name = "playbook_exec_name" + mock_process = MagicMock() + mock_popen.return_value = mock_process + + # Act + playbook.run_it() + + # Assert + mock_logger.info.assert_called_with( + f"Run Playbook for {playbook.playbook_exec_name} - " + f"[['/usr/local/bin/ansible-playbook', '-v', '-i', 'inventory_name', 'directory_name/playbook_exec_name']]" + ) + mock_popen.assert_called_once_with( + [ + "/usr/local/bin/ansible-playbook", + "-v", + "-i", + "inventory_name", + "directory_name/playbook_exec_name", + ], + stdout=playbook.log_file_stdout, + stderr=playbook.log_file_stderr, + universal_newlines=True, + ) + self.assertEqual(playbook.process, mock_process) + + def test_add_always_tasks_only(self): + # Arrange + instance = self.init_playbook() + playbook_name = "example_playbook" + instance.always_tasks = [] + + # Act + instance.add_always_tasks_only(playbook_name) + + # Assert + expected_task = { + "name": f"Running {playbook_name} tasks", + "import_tasks": f"{playbook_name}.yml", + } + self.assertIn(expected_task, instance.always_tasks) + + def test_add_tasks_only(self): + # Arrange + instance = self.init_playbook() + playbook_name = "example_playbook" + instance.tasks = [] + + # Act + instance.add_tasks_only(playbook_name) + + # Assert + expected_task = { + "name": f"Running {playbook_name} tasks", + "import_tasks": f"{playbook_name}.yml", + } + self.assertIn(expected_task, instance.tasks) + + def test_add_to_playbook_always_lists(self): + # Arrange + instance = self.init_playbook() + playbook_name = "example_playbook" + + instance.vars_files = [] + instance.always_tasks = [] + + # Act + instance.add_to_playbook_always_lists(playbook_name) + + # Assert + expected_vars_file = f"{playbook_name}_vars_file.yml" + expected_always_task = { + "name": f"Running {playbook_name} tasks", + "import_tasks": f"{playbook_name}.yml", + } + self.assertIn(expected_vars_file, instance.vars_files) + self.assertIn(expected_always_task, instance.always_tasks) + + @patch("simple_vm_client.forc_connector.playbook.playbook.logger") + def test_add_to_playbook_lists(self, mock_logger): + # Arrange + instance = self.init_playbook() + playbook_name_local = "example_local_playbook" + playbook_name = "example_playbook" + instance.vars_files = [] + instance.tasks = [] + + # Act + instance.add_to_playbook_lists(playbook_name_local, playbook_name) + + # Assert + expected_vars_file = f"{playbook_name}_vars_file.yml" + expected_task = { + "name": f"Running {playbook_name_local} tasks", + "import_tasks": f"{playbook_name_local}.yml", + } + mock_logger.info.assert_called_once_with( + "Added playbook: " + + playbook_name_local + + ".yml" + + ", vars file: " + + playbook_name + + "_vars_file.yml" + ) + self.assertIn(expected_vars_file, instance.vars_files) + self.assertIn(expected_task, instance.tasks) + + @patch("simple_vm_client.forc_connector.playbook.playbook.shutil.copytree") + @patch("simple_vm_client.forc_connector.playbook.playbook.open") + @patch("simple_vm_client.forc_connector.playbook.playbook.os.path.isfile") + def test_copy_and_init_conda_packages(self, mock_isfile, mock_open, mock_copytree): + # Arrange + mock_yaml_exec = MagicMock() + instance = self.init_playbook() + instance.yaml_exec = mock_yaml_exec + instance.conda_packages = DEFAULT_CONDA_PACKAGES + instance.add_to_playbook_lists = MagicMock() + instance.add_tasks_only = MagicMock() + + mock_tempdir = MagicMock() + mock_tempdir.name = "/tmp/test_temp_dir" + instance.playbooks_dir = Template.get_playbook_dir() + + instance.directory = mock_tempdir + instance.cloud_site = "your_cloud_site" + + # Set up mock for os.path.isfile + mock_isfile.return_value = True + + # Act + instance.copy_and_init_conda_packages() + + # Assert + mock_copytree.assert_called_once_with( + f"{instance.playbooks_dir}/{CONDA}", mock_tempdir.name, dirs_exist_ok=True + ) + + mock_open.assert_any_call( + f"{mock_tempdir.name}/{CONDA}_vars_file.yml", mode="r" + ) + mock_open.assert_any_call( + f"{mock_tempdir.name}/{CONDA}_vars_file.yml", mode="w" + ) + + mock_yaml_exec.load.assert_called_once() + + mock_yaml_exec.dump.assert_called_once() + + instance.add_to_playbook_lists.assert_called_once_with( + CONDA + "-" + instance.cloud_site, CONDA + ) + + # Check that add_tasks_only is not called + instance.add_tasks_only.assert_not_called() + + @patch("simple_vm_client.forc_connector.playbook.playbook.shutil.copytree") + def test_copy_and_init_conda_packages_no_conda_packages(self, mock_copytree): + playbook = self.init_playbook() + playbook.copy_and_init_conda_packages() + mock_copytree.assert_not_called() + + @patch("simple_vm_client.forc_connector.playbook.playbook.shutil.copytree") + @patch("simple_vm_client.forc_connector.playbook.playbook.open") + @patch("simple_vm_client.forc_connector.playbook.playbook.os.path.isfile") + @patch("simple_vm_client.forc_connector.playbook.playbook.logger.exception") + def test_copy_and_init_conda_packages_error( + self, mock_logger_exception, mock_is_file, mock_open, mock_copytree + ): + # Arrange + mock_yaml_exec = MagicMock() + + mock_is_file.return_value = True + instance = self.init_playbook() + instance.yaml_exec = mock_yaml_exec + instance.conda_packages = DEFAULT_CONDA_PACKAGES + instance.add_to_playbook_lists = MagicMock() + instance.add_tasks_only = MagicMock() + + mock_tempdir = MagicMock() + mock_tempdir.name = "/tmp/test_temp_dir" + instance.playbooks_dir = Template.get_playbook_dir() + + instance.directory = mock_tempdir + instance.cloud_site = "your_cloud_site" + + # Set up mock for os.path.isfile + mock_open.side_effect = IOError("Error reading file") + + # Act and Assert + instance.copy_and_init_conda_packages() + playbook_var_yml = f"/{CONDA}_vars_file.yml" + + mock_logger_exception.assert_called_once_with( + f"Could not open - {instance.directory.name + playbook_var_yml}" + ) + + instance.add_to_playbook_lists.assert_not_called() + + instance.add_tasks_only.assert_called_once_with( + CONDA + "-" + instance.cloud_site + ) + + @patch("simple_vm_client.forc_connector.playbook.playbook.shutil.copy") + @patch( + "simple_vm_client.forc_connector.playbook.playbook.os.path.isfile", + return_value=True, + ) + @patch("builtins.open", new_callable=unittest.mock.mock_open) + @patch("simple_vm_client.forc_connector.playbook.playbook.logger.info") + def test_copy_and_init_apt_packages( + self, mock_logger_info, mock_open, mock_isfile, mock_copy + ): + # Arrange + instance = self.init_playbook() # Initialize your class with appropriate values + mock_yaml_exec = MagicMock() + instance.yaml_exec = mock_yaml_exec + + # Mock apt_packages and other necessary attributes + instance.apt_packages = DEFAULT_APT_PACKAGES + + # Act + instance.copy_and_init_apt_packages() + + # Assert + # Add your assertions based on the expected behavior of the method + + # Verify that shutil.copy is called with the correct arguments + playbook_yml = os.path.join( + instance.playbooks_dir, OPTIONAL + "-" + instance.cloud_site + ".yml" + ) + mock_copy.assert_any_call(playbook_yml, instance.directory.name) + playbook_vars_yml = os.path.join( + instance.playbooks_dir, OPTIONAL + "_vars_file.yml" + ) + + mock_copy.assert_any_call(playbook_vars_yml, instance.directory.name) + + target_playbook_vars = os.path.join( + instance.directory.name, OPTIONAL + "_vars_file.yml" + ) + + # Verify that open is called with the correct arguments + mock_open.assert_any_call(target_playbook_vars, mode="r") + mock_logger_info.assert_called_once_with( + "Added playbook: " + + OPTIONAL + + "-" + + instance.cloud_site + + ".yml" + + ", vars file: " + + OPTIONAL + + "_vars_file.yml" + ) + + @patch("simple_vm_client.forc_connector.playbook.playbook.shutil.copytree") + def test_copy_and_init_apt_packages_no_apt_packages(self, mock_copytree): + playbook = self.init_playbook() + playbook.copy_and_init_apt_packages() + mock_copytree.assert_not_called() + + @patch( + "simple_vm_client.forc_connector.playbook.playbook.shutil.copy", + side_effect=IOError("Error copying file"), + ) + @patch("simple_vm_client.forc_connector.playbook.playbook.logger.exception") + def test_copy_and_init_apt_packages_raises_io_error( + self, mock_logger_exception, mock_copy + ): + # Arrange + obj = self.init_playbook() + obj.apt_packages = DEFAULT_APT_PACKAGES # Set your desired apt_packages + + # Act and Assert + obj.copy_and_init_apt_packages() + mock_logger_exception.assert_called_once_with("Could not copy apt packages") + + @patch( + "simple_vm_client.forc_connector.playbook.playbook.open", + side_effect=IOError("Error copying file"), + ) + @patch("simple_vm_client.forc_connector.playbook.playbook.logger.exception") + def test_copy_and_init_apt_packages_raises_open_error( + self, mock_logger_exception, mock_copy + ): + # Arrange + obj = self.init_playbook() + obj.apt_packages = DEFAULT_APT_PACKAGES # Set your desired apt_packages + obj.add_tasks_only = MagicMock() + + # Act and Assert + obj.copy_and_init_apt_packages() + mock_logger_exception.assert_called_once_with("Could not copy apt packages") + obj.add_tasks_only.assert_called_once_with(OPTIONAL) + + @patch("simple_vm_client.forc_connector.playbook.playbook.shutil.copytree") + @patch( + "simple_vm_client.forc_connector.playbook.playbook.os.path.isfile", + return_value=True, + ) # Mocking os.path.isfile to return True + @patch("builtins.open", new_callable=unittest.mock.mock_open) + @patch("simple_vm_client.forc_connector.playbook.playbook.logger.info") + def test_copy_and_init_research_environment( + self, mock_logger_info, mock_open, mock_isfile, mock_copytree + ): + # Arrange + instance = self.init_playbook() # Create an instance of YourClass + mock_yaml_exec = MagicMock() + instance.yaml_exec = mock_yaml_exec + + # Mock data and methods + instance.research_environment_template = "template_name" + instance.cloud_site = "cloud_site" + instance.research_environment_template_version = "template_version" + instance.create_only_backend = True + instance.base_url = "base_url" + + # Act + instance.copy_and_init_research_environment() + + # Assert + mock_copytree.assert_called_once_with( + f"{instance.playbooks_dir}/template_name", + instance.directory.name, + dirs_exist_ok=True, + ) + mock_isfile.assert_any_call( + f"{instance.directory.name}/template_name-cloud_site.yml" + ) + mock_open.assert_any_call( + f"{instance.directory.name}/template_name_vars_file.yml", mode="r" + ) + mock_open.assert_any_call( + f"{instance.directory.name}/template_name_vars_file.yml", mode="w" + ) + mock_logger_info.assert_called_once_with( + f"Added playbook: {instance.research_environment_template}-{instance.cloud_site}.yml," + f" vars file: {instance.research_environment_template}_vars_file.yml" + ) + + @patch("simple_vm_client.forc_connector.playbook.playbook.shutil.copytree") + def test_copy_and_init_research_environment_no_template(self, mock_copytree): + playbook = self.init_playbook() + playbook.copy_and_init_research_environment() + mock_copytree.assert_not_called() + + @patch( + "simple_vm_client.forc_connector.playbook.playbook.open", + side_effect=IOError("Error copying file"), + ) + @patch("simple_vm_client.forc_connector.playbook.playbook.logger.exception") + @patch("simple_vm_client.forc_connector.playbook.playbook.shutil.copytree") + def test_copy_and_init_research_environment_error( + self, mock_copy, mock_logger_exception, mock_open + ): + # Arrange + obj = self.init_playbook() + obj.research_environment_template = "template_name" + obj.add_tasks_only = MagicMock() + + # Act and Assert + obj.copy_and_init_research_environment() + mock_logger_exception.assert_called_once_with( + "Could not copy research environment template data" + ) + obj.add_tasks_only.assert_called_once_with(obj.research_environment_template) + + @patch("shutil.copytree") + @patch( + "os.path.isfile", return_value=False + ) # Mocking os.path.isfile to return False + @patch("shutil.copy") + @patch("builtins.open", new_callable=unittest.mock.mock_open) + def test_copy_playbooks_and_init( + self, mock_open, mock_copy, mock_copytree, mock_isfile + ): + playbook = self.init_playbook() + mock_yaml_exec = MagicMock() + playbook.yaml_exec = mock_yaml_exec + playbook.copy_and_init_conda_packages = MagicMock() + playbook.copy_and_init_apt_packages = MagicMock() + playbook.copy_and_init_research_environment = MagicMock() + playbook.copy_and_init_change_keys = MagicMock() + playbook.copy_playbooks_and_init(public_key=DEFAULT_PUBLIC_KEY) + + playbook.copy_and_init_conda_packages.assert_called_once_with() + playbook.copy_and_init_apt_packages.assert_called_once_with() + playbook.copy_and_init_research_environment.assert_called_once_with() + playbook.copy_and_init_change_keys.assert_called_once_with( + public_key=DEFAULT_PUBLIC_KEY + ) + + patch("shutil.copy") + + @patch( + "os.path.isfile", return_value=True + ) # Mocking os.path.isfile to return False + @patch("builtins.open", new_callable=unittest.mock.mock_open) + @patch("simple_vm_client.forc_connector.playbook.playbook.shutil.copy") + def test_copy_and_init_change_keys(self, mock_copy, mock_open, mock_isfile): + playbook = self.init_playbook() + mock_yaml_exec = MagicMock() + mock_yaml_exec.load.return_value = {"change_key_vars": {"key": None}} + playbook.yaml_exec = mock_yaml_exec + playbook.add_to_playbook_always_lists = MagicMock() + key_file_mock = MagicMock() + + mock_open.side_effect = [key_file_mock, key_file_mock] + + # Act + playbook.copy_and_init_change_keys(public_key=DEFAULT_PUBLIC_KEY) + + # Assert + expected_copy_calls = [ + unittest.mock.call( + "/path/to/playbooks/change_key.yml", "/path/to/directory" + ), + unittest.mock.call( + "/path/to/playbooks/change_key_vars_file.yml", "/path/to/directory" + ), + ] + mock_copy(expected_copy_calls, any_order=True) + + key_file = playbook.directory.name + "/change_key_vars_file.yml" + + data_ck = {"change_key_vars": {"key": None}} + data_ck["change_key_vars"]["key"] = DEFAULT_PUBLIC_KEY.strip('"') + + mock_open.assert_any_call(key_file, mode="r"), + mock_open.assert_any_call(key_file, mode="w") + mock_yaml_exec.load.assert_called_once_with(key_file_mock.__enter__()) + mock_yaml_exec.dump.assert_called_once_with(data_ck, key_file_mock.__enter__()) + playbook.add_to_playbook_always_lists.assert_called_once_with("change_key") + + @patch("simple_vm_client.forc_connector.playbook.playbook.shutil.copytree") + @patch( + "os.path.isfile", return_value=True + ) # Mocking os.path.isfile to return False + @patch("shutil.copy") + @patch("builtins.open", new_callable=unittest.mock.mock_open) + @patch( + "simple_vm_client.forc_connector.playbook.playbook.Playbook.copy_playbooks_and_init" + ) + @patch("simple_vm_client.forc_connector.playbook.playbook.redis.Redis") + def test_init( + self, + mock_redis, + mock_copy_playbooks_and_init, + mock_open, + mock_copy, + mock_isfile, + mock_copytree, + ): + # Act + instance = Playbook( + ip=DEFAULT_IP, + port=DEFAULT_PORT, + research_environment_template=DEFAULT_RESEARCH_ENVIRONMENT_TEMPLATE, + research_environment_template_version=DEFAULT_RESEARCH_ENVIRONMENT_VERSION, + create_only_backend=False, + conda_packages=DEFAULT_CONDA_PACKAGES, + apt_packages=DEFAULT_APT_PACKAGES, + osi_private_key=DEFAULT_PRIVATE_KEY, + public_key=DEFAULT_PUBLIC_KEY, + pool=DEFAULT_POOL, + cloud_site=DEFAULT_CLOUD_SITE, + base_url=DEFAULT_BASE_URL, + ) + + # Assert + self.assertEqual(instance.cloud_site, DEFAULT_CLOUD_SITE) + self.assertIsNotNone(instance.redis) + self.assertIsNotNone(instance.yaml_exec) + self.assertEqual(instance.conda_packages, DEFAULT_CONDA_PACKAGES) + self.assertEqual(instance.apt_packages, DEFAULT_APT_PACKAGES) + self.assertIsNone(instance.process) + self.assertEqual( + instance.research_environment_template_version, + DEFAULT_RESEARCH_ENVIRONMENT_VERSION, + ) + self.assertEqual(instance.create_only_backend, False) + self.assertEqual(instance.returncode, -1) + self.assertEqual(instance.stdout, "") + self.assertEqual(instance.stderr, "") + self.assertEqual( + instance.research_environment_template, + DEFAULT_RESEARCH_ENVIRONMENT_TEMPLATE, + ) + self.assertEqual(instance.base_url, DEFAULT_BASE_URL) + + mock_copy_playbooks_and_init.assert_called_once() From 73c30806347a673576b6057f3ad1a8c80e7319de Mon Sep 17 00:00:00 2001 From: dweinholz Date: Wed, 3 Jan 2024 08:21:00 +0100 Subject: [PATCH 28/39] feat(UnitTest):added bibigrid connector tests --- .../bibigrid_connector/bibigrid_connector.py | 41 +- .../test_bibigrid_connector.py | 364 ++++++++++++++++++ 2 files changed, 387 insertions(+), 18 deletions(-) create mode 100644 simple_vm_client/bibigrid_connector/test_bibigrid_connector.py diff --git a/simple_vm_client/bibigrid_connector/bibigrid_connector.py b/simple_vm_client/bibigrid_connector/bibigrid_connector.py index 5c0d8d9..547ad33 100644 --- a/simple_vm_client/bibigrid_connector/bibigrid_connector.py +++ b/simple_vm_client/bibigrid_connector/bibigrid_connector.py @@ -54,27 +54,32 @@ def load_config_yml(self, config_file: str) -> None: def get_cluster_status(self, cluster_id: str) -> dict[str, str]: logger.info(f"Get Cluster {cluster_id} status") + headers = {"content-Type": "application/json"} body = {"mode": "openstack"} - request_url = self._BIBIGRID_URL + "info/" + cluster_id - response = requests.get( - url=request_url, - json=body, - headers=headers, - verify=self._PRODUCTION, - ) - logger.info(f"Cluster {cluster_id} status: {str(response.content)} ") - json_resp: dict[str, str] = response.json(strict=False) - try: - json_resp["log"] = str(json_resp["log"]) - except Exception: - logger.info(f"No Logs for Cluster - {cluster_id}") + request_url = f"{self._BIBIGRID_URL}info/{cluster_id}" + try: - json_resp["msg"] = str(json_resp["msg"]) - except Exception: - logger.info(f"No msg for Cluster - {cluster_id}") + response = requests.get( + url=request_url, + json=body, + headers=headers, + verify=self._PRODUCTION, + ) + response.raise_for_status() # Raise an exception for HTTP errors (4xx and 5xx) + json_resp = response.json(strict=False) + + # Convert log and msg keys to strings, handling the case where they might not exist + json_resp["log"] = str(json_resp.get("log", "")) + json_resp["msg"] = str(json_resp.get("msg", "")) - return json_resp + logger.info(f"Cluster {cluster_id} status: {json_resp}") + + return json_resp + + except requests.RequestException as e: + logger.exception("Error while getting Cluster status") + return {"error": str(e)} def get_cluster_info(self, cluster_id: str) -> ClusterInfo: logger.info(f"Get Cluster info from {cluster_id}") @@ -128,7 +133,7 @@ def is_bibigrid_available(self) -> bool: logger.error(f"Bibigrid returned status code {response.status_code}") return False - except requests.RequestException as e: + except requests.RequestException: logger.exception("Error while checking Bibigrid availability") return False diff --git a/simple_vm_client/bibigrid_connector/test_bibigrid_connector.py b/simple_vm_client/bibigrid_connector/test_bibigrid_connector.py new file mode 100644 index 0000000..d71dba5 --- /dev/null +++ b/simple_vm_client/bibigrid_connector/test_bibigrid_connector.py @@ -0,0 +1,364 @@ +import os +import tempfile +import unittest +from unittest.mock import MagicMock, Mock, patch + +import requests + +from simple_vm_client.bibigrid_connector.bibigrid_connector import BibigridConnector +from simple_vm_client.ttypes import ClusterInfo, ClusterInstance + +HOST = "example.com" +PORT = 8080 +HTTPS = True +MODES = ["mode1", "mode2"] +MASTER_WITH_PUBLIC_IP = False +LOCAL_DNS_LOOKUP = False +NETWORK = "my_network" +SUB_NETWORK = "my_sub_network" +PRODUCTION = True +DEFAULT_CLUSTER_INFO = ClusterInfo( + group_id="fake_group_id", + network_id="fake_network_id", + public_ip="fake_public_ip", + subnet_id="fake_subnet_id", + user="fake_user", + inst_counter=42, + cluster_id="fake_cluster_id", + key_name="fake_key_name", +) +DEFAULT_MASTER_INSTANCE = ClusterInstance(image="master_image", type="master_flavor") +DEFAULT_WORKER_INSTANCES = [ + ClusterInstance(image="worker_flavor", count=3, type="worker_flavor") +] + + +class TestBibigridConnector(unittest.TestCase): + @patch( + "simple_vm_client.bibigrid_connector.bibigrid_connector.BibigridConnector.is_bibigrid_available" + ) + def setUp(self, mock_is_bibigrid_available): + self.fake_config = f""" + bibigrid: + host: {HOST} + port: {PORT} + https: {HTTPS} + modes: {MODES} + use_master_with_public_ip: {MASTER_WITH_PUBLIC_IP} + localDnsLookup: {LOCAL_DNS_LOOKUP} + ansibleGalaxyRoles: + - role1 + - role2 + + openstack: + network: {NETWORK} + sub_network: {SUB_NETWORK} + + production: {PRODUCTION} + """ + self.fake_config_file = tempfile.NamedTemporaryFile( + mode="w+", suffix=".yml", delete=False + ) + self.fake_config_file.write(self.fake_config) + self.fake_config_file.close() + mock_is_bibigrid_available.return_value = True + self.connector = BibigridConnector(config_file=self.fake_config_file.name) + + def tearDown(self): + # Clean up: Remove the temporary file + os.remove(self.fake_config_file.name) + + @patch( + "simple_vm_client.bibigrid_connector.bibigrid_connector.BibigridConnector.is_bibigrid_available" + ) + def test_load_config_yml(self, mock_is_bibigrid_available): + # Instantiate BibigridConnector with the fake config + self.connector.load_config_yml(self.fake_config_file.name) + self.assertEqual(self.connector._BIBIGRID_HOST, HOST) + self.assertEqual(self.connector._BIBIGRID_PORT, PORT) + self.assertEqual(self.connector._BIBIGRID_USE_HTTPS, HTTPS) + self.assertEqual(self.connector._BIBIGRID_MODES, MODES) + self.assertEqual( + self.connector._BIBIGRID_USE_MASTER_WITH_PUBLIC_IP, MASTER_WITH_PUBLIC_IP + ) + self.assertEqual(self.connector._BIBIGRID_LOCAL_DNS_LOOKUP, LOCAL_DNS_LOOKUP) + self.assertEqual(self.connector._BIBIGRID_ANSIBLE_ROLES, ["role1", "role2"]) + self.assertEqual(self.connector._NETWORK, NETWORK) + self.assertEqual(self.connector._SUB_NETWORK, SUB_NETWORK) + self.assertEqual(self.connector._PRODUCTION, PRODUCTION) + + # Check the generated URLs + self.assertEqual( + self.connector._BIBIGRID_URL, "https://example.com:8080/bibigrid/" + ) + self.assertEqual(self.connector._BIBIGRID_EP, "https://example.com:8080") + + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.logger.info") + def test_is_bibigrid_available_when_url_not_set(self, mock_logger_info): + # Arrange + self.connector._BIBIGRID_EP = "" + + # Act + result = self.connector.is_bibigrid_available() + + # Assert + mock_logger_info.assert_any_call("Checking if Bibigrid is available") + mock_logger_info.assert_any_call("Bibigrid Url is not set") + self.assertFalse(result) + + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.logger.info") + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.requests.get") + def test_is_bibigrid_available_when_request_succeeds( + self, mock_get, mock_logger_info + ): + # Arrange + mock_get.return_value = Mock(status_code=200) + + # Act + result = self.connector.is_bibigrid_available() + mock_logger_info.assert_any_call("Checking if Bibigrid is available") + + # Assert + self.assertTrue(result) + mock_get.assert_called_once_with(f"{self.connector._BIBIGRID_EP}/server/health") + + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.logger.info") + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.logger.error") + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.requests.get") + def test_is_bibigrid_available_when_request_wrong_status_code( + self, mock_get, mock_logger_error, mock_logger_info + ): + # Arrange + mock_get.return_value = Mock(status_code=500) + + # Act + result = self.connector.is_bibigrid_available() + mock_logger_info.assert_any_call("Checking if Bibigrid is available") + + # Assert + self.assertFalse(result) + mock_get.assert_called_once_with(f"{self.connector._BIBIGRID_EP}/server/health") + mock_logger_error.assert_called_once_with("Bibigrid returned status code 500") + + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.logger.info") + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.logger.error") + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.requests.get") + def test_is_bibigrid_available_when_request_exception( + self, mock_get, mock_logger_error, mock_logger_info + ): + # Arrange + mock_get.side_effect = requests.RequestException("Could not connect") + + # Act + result = self.connector.is_bibigrid_available() + mock_logger_info.assert_any_call("Checking if Bibigrid is available") + + # Assert + self.assertFalse(result) + mock_get.assert_called_once_with(f"{self.connector._BIBIGRID_EP}/server/health") + mock_logger_error.assert_called_once_with( + "Error while checking Bibigrid availability", exc_info=True + ) + + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.requests.delete") + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.logger.info") + def test_terminate_cluster(self, mock_logger_info, mock_delete): + # Arrange + cluster_id = "fake_cluster_id" + + expected_url = f"{self.connector._BIBIGRID_URL}terminate/{cluster_id}" + expected_headers = {"content-Type": "application/json"} + expected_body = {"mode": "openstack"} + expected_response = {"fake_key": "fake_value"} + + mock_delete.return_value = MagicMock(json=lambda: expected_response) + + # Act + result = self.connector.terminate_cluster(cluster_id) + + # Assert + mock_delete.assert_called_once_with( + url=expected_url, + json=expected_body, + headers=expected_headers, + verify=self.connector._PRODUCTION, + ) + mock_logger_info.assert_any_call(f"Terminate cluster: {cluster_id}") + mock_logger_info.assert_any_call(expected_response) + self.assertEqual(result, expected_response) + + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.requests.post") + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.logger.info") + def test_start_cluster(self, mock_logger_info, mock_post): + public_key = "fake_public_key" + + user = "fake_user" + + # Mock the response from the requests.post call + mock_post.return_value.json.return_value = {"fake": "response"} + + # Call the method to test + result = self.connector.start_cluster( + public_key=public_key, + master_instance=DEFAULT_MASTER_INSTANCE, + worker_instances=DEFAULT_WORKER_INSTANCES, + user=user, + ) + wI = [] + for wk in DEFAULT_WORKER_INSTANCES: + wI.append(wk.__dict__) + body = { + "mode": "openstack", + "subnet": self.connector._SUB_NETWORK, + "sshPublicKeys": [public_key], + "user": user, + "sshUser": "ubuntu", + "masterInstance": DEFAULT_MASTER_INSTANCE.__dict__, + "workerInstances": wI, + "useMasterWithPublicIp": self.connector._BIBIGRID_USE_MASTER_WITH_PUBLIC_IP, + "ansibleGalaxyRoles": self.connector._BIBIGRID_ANSIBLE_ROLES, + "localDNSLookup": self.connector._BIBIGRID_LOCAL_DNS_LOOKUP, + } + for mode in self.connector._BIBIGRID_MODES: + body.update({mode: True}) + + # Assertions + mock_post.assert_called_once_with( + url=self.connector._BIBIGRID_URL + "create", + json=body, + headers={"content-Type": "application/json"}, + verify=self.connector._PRODUCTION, + ) + args, kwargs = mock_post.call_args + self.assertEqual(kwargs["json"]["sshPublicKeys"], [public_key]) + self.assertEqual(kwargs["json"]["user"], user) + self.assertIn("masterInstance", kwargs["json"]) + self.assertIn("workerInstances", kwargs["json"]) + self.assertIn("useMasterWithPublicIp", kwargs["json"]) + self.assertIn("ansibleGalaxyRoles", kwargs["json"]) + self.assertIn("localDNSLookup", kwargs["json"]) + self.assertIn("openstack", kwargs["json"]["mode"]) + + self.assertEqual(result, {"fake": "response"}) + + for wk in DEFAULT_WORKER_INSTANCES: + mock_logger_info.assert_any_call(wk) + mock_logger_info.assert_any_call({"fake": "response"}) + + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.requests.get") + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.logger.info") + def test_get_clusters_info(self, mock_logger_info, mock_get): + # Mock the response from the requests.get call + mock_response = MagicMock() + mock_response.json.return_value = {"info": [{"cluster-id": "fake_cluster_id"}]} + mock_get.return_value = mock_response + + # Call the method to test + result = self.connector.get_clusters_info() + headers = {"content-Type": "application/json"} + body = {"mode": "openstack"} + + mock_get.assert_called_once_with( + url=self.connector._BIBIGRID_URL + "list", + json=body, + headers=headers, + verify=self.connector._PRODUCTION, + ) + + # Assertions + + self.assertEqual(result, [{"cluster-id": "fake_cluster_id"}]) + mock_logger_info.assert_called_once_with("Get clusters info") + + @patch.object(BibigridConnector, "get_clusters_info") + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.logger.info") + def test_get_cluster_info_none(self, mock_logger_info, mock_get_clusters_info): + mock_get_clusters_info.return_value = [] + result = self.connector.get_cluster_info("fake_cluster_id") + mock_logger_info.assert_any_call("Get Cluster info from fake_cluster_id") + self.assertIsNone(result) + + @patch.object(BibigridConnector, "get_clusters_info") + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.logger.info") + def test_get_cluster_info(self, mock_logger_info, mock_get_clusters_info): + # Mock the response from get_clusters_info + mock_get_clusters_info.return_value = [ + { + "cluster-id": "fake_cluster_id", + "group-id": "fake_group_id", + "network-id": "fake_network_id", + "public-ip": "fake_public_ip", + "subnet-id": "fake_subnet_id", + "user": "fake_user", + "# inst": 1, + "key name": "fake_key_name", + } + ] + + # Call the method to test + result = self.connector.get_cluster_info("fake_cluster_id") + + # Assertions + mock_get_clusters_info.assert_called_once() + self.assertEqual( + result, + ClusterInfo( + group_id="fake_group_id", + network_id="fake_network_id", + public_ip="fake_public_ip", + subnet_id="fake_subnet_id", + user="fake_user", + inst_counter=1, + cluster_id="fake_cluster_id", + key_name="fake_key_name", + ), + ) + mock_logger_info.assert_any_call("Get Cluster info from fake_cluster_id") + mock_logger_info.assert_any_call(f"Cluster fake_cluster_id info: {result} ") + + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.requests.get") + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.logger.info") + def test_get_cluster_status(self, mock_logger_info, mock_requests_get): + # Arrange + cluster_id = "123" + + # Mock the response from requests.get + response_data = {"log": "Some log", "msg": "Some message"} + mock_response = MagicMock() + mock_response.json.return_value = response_data + mock_response.raise_for_status.return_value = None + mock_requests_get.return_value = mock_response + + # Act + result = self.connector.get_cluster_status(cluster_id) + + # Assert + mock_requests_get.assert_called_once_with( + url=f"{self.connector._BIBIGRID_URL}info/{cluster_id}", + json={"mode": "openstack"}, + headers={"content-Type": "application/json"}, + verify=self.connector._PRODUCTION, + ) + mock_response.json.assert_called_once() + mock_response.raise_for_status.assert_called_once() + mock_logger_info.assert_called_with( + f"Cluster {cluster_id} status: {response_data}" + ) + self.assertEqual(result, response_data) + + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.requests.get") + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.logger.info") + @patch("simple_vm_client.bibigrid_connector.bibigrid_connector.logger.exception") + def test_get_cluster_status_with_exception( + self, mock_logger_exception, mock_logger_info, mock_requests_get + ): + # Arrange + cluster_id = "123" + mock_requests_get.side_effect = requests.RequestException("Could not connect") + # Act + result = self.connector.get_cluster_status(cluster_id) + + self.assertEqual(result, {"error": "Could not connect"}) + mock_logger_exception.assert_called_once_with( + "Error while getting Cluster status" + ) From d4809c32ae1bb8d0f19af85e4a2e04113f88d58c Mon Sep 17 00:00:00 2001 From: dweinholz Date: Wed, 3 Jan 2024 11:26:08 +0100 Subject: [PATCH 29/39] added more tests openstack --- .../openstack_connector.py | 157 +++--- simple_vm_client/test_openstack_connector.py | 461 ++++++++++++++++++ 2 files changed, 537 insertions(+), 81 deletions(-) diff --git a/simple_vm_client/openstack_connector/openstack_connector.py b/simple_vm_client/openstack_connector/openstack_connector.py index 04c793c..67d2690 100644 --- a/simple_vm_client/openstack_connector/openstack_connector.py +++ b/simple_vm_client/openstack_connector/openstack_connector.py @@ -1218,46 +1218,22 @@ def start_server( network: Network = self.get_network() key_name = f"{servername}_{metadata['project_name']}" logger.info(f"Key name {key_name}") - security_groups = self._get_default_security_groups() - if research_environment_metadata: - security_groups.append( - self.get_or_create_research_environment_security_group( - resenv_metadata=research_environment_metadata - ) - ) project_name = metadata.get("project_name") project_id = metadata.get("project_id") - if project_name and project_id: - security_groups.append( - self.get_or_create_project_security_group( - project_name=project_name, project_id=project_id - ) - ) - if additional_security_group_ids: - for security_id in additional_security_group_ids: - sec = self.openstack_connection.get_security_group( - name_or_id=security_id - ) - if sec: - security_groups.append(sec["id"]) + security_groups = self._get_security_groups_starting_machine( + additional_security_group_ids=additional_security_group_ids, + project_name=project_name, + project_id=project_id, + research_environment_metadata=research_environment_metadata, + ) + public_key = urllib.parse.unquote(public_key) self.import_keypair(key_name, public_key) - volume_ids = [] - volumes = [] - if volume_ids_path_new: - volume_ids.extend([vol["openstack_id"] for vol in volume_ids_path_new]) - if volume_ids_path_attach: - volume_ids.extend( - [vol["openstack_id"] for vol in volume_ids_path_attach] - ) - logger.info(f"volume ids {volume_ids}") - for volume_id in volume_ids: - try: - volumes.append(self.get_volume(name_or_id=volume_id)) - except VolumeNotFoundException: - logger.error( - f"Could not find volume: {volume_id} - attaching to server {servername} won't work!" - ) + volumes = self._get_volumes_machines_start( + volume_ids_path_new=volume_ids_path_new, + volume_ids_path_attach=volume_ids_path_attach, + ) + init_script = self.create_userdata( volume_ids_path_new=volume_ids_path_new, volume_ids_path_attach=volume_ids_path_attach, @@ -1288,20 +1264,29 @@ def start_server( logger.exception(f"Start Server {servername} error:{e}") raise DefaultException(message=str(e)) - def start_server_with_playbook( + def _get_volumes_machines_start( self, - flavor_name: str, - image_name: str, - servername: str, - metadata: dict[str, str], - research_environment_metadata: ResearchEnvironmentMetadata, - volume_ids_path_new: list[dict[str, str]] = None, # type: ignore - volume_ids_path_attach: list[dict[str, str]] = None, # type: ignore - additional_keys: list[str] = None, # type: ignore - additional_security_group_ids=None, # type: ignore - ) -> tuple[str, str]: - logger.info(f"Start Server {servername}") - + volume_ids_path_new: list[dict[str, str]] = None, + volume_ids_path_attach: list[dict[str, str]] = None, + ) -> list[Volume]: + volume_ids = [] + volumes = [] + if volume_ids_path_new: + volume_ids.extend([vol["openstack_id"] for vol in volume_ids_path_new]) + if volume_ids_path_attach: + volume_ids.extend([vol["openstack_id"] for vol in volume_ids_path_attach]) + logger.info(f"volume ids {volume_ids}") + for volume_id in volume_ids: + volumes.append(self.openstack_connection.get_volume(name_or_id=volume_id)) + return volumes + + def _get_security_groups_starting_machine( + self, + additional_security_group_ids: Union[list[str], None] = None, + project_name: Union[str, None] = None, + project_id: Union[str, None] = None, + research_environment_metadata: Union[ResearchEnvironmentMetadata, None] = None, + ) -> list[str]: security_groups = self._get_default_security_groups() if research_environment_metadata: security_groups.append( @@ -1309,8 +1294,6 @@ def start_server_with_playbook( resenv_metadata=research_environment_metadata ) ) - project_name = metadata.get("project_name") - project_id = metadata.get("project_id") if project_name and project_id: security_groups.append( self.get_or_create_project_security_group( @@ -1324,7 +1307,31 @@ def start_server_with_playbook( ) if sec: security_groups.append(sec["id"]) - key_name = "" + return security_groups + + def start_server_with_playbook( + self, + flavor_name: str, + image_name: str, + servername: str, + metadata: dict[str, str], + research_environment_metadata: ResearchEnvironmentMetadata, + volume_ids_path_new: list[dict[str, str]] = None, # type: ignore + volume_ids_path_attach: list[dict[str, str]] = None, # type: ignore + additional_keys: list[str] = None, # type: ignore + additional_security_group_ids=None, # type: ignore + ) -> tuple[str, str]: + logger.info(f"Start Server {servername}") + + project_name = metadata.get("project_name") + project_id = metadata.get("project_id") + security_groups = self._get_security_groups_starting_machine( + additional_security_group_ids=additional_security_group_ids, + project_name=project_name, + project_id=project_id, + research_environment_metadata=research_environment_metadata, + ) + key_name = None try: image: Image = self.get_image(name_or_id=image_name) flavor: Flavor = self.get_flavor(name_or_id=flavor_name) @@ -1333,22 +1340,15 @@ def start_server_with_playbook( key_creation: Keypair = self.openstack_connection.create_keypair( name=servername ) + key_name = key_creation.name private_key = key_creation.private_key - volume_ids = [] - volumes = [] - if volume_ids_path_new: - volume_ids.extend([vol["openstack_id"] for vol in volume_ids_path_new]) - if volume_ids_path_attach: - volume_ids.extend( - [vol["openstack_id"] for vol in volume_ids_path_attach] - ) - logger.info(f"volume ids {volume_ids}") - for volume_id in volume_ids: - volumes.append( - self.openstack_connection.get_volume(name_or_id=volume_id) - ) + volumes = self._get_volumes_machines_start( + volume_ids_path_new=volume_ids_path_new, + volume_ids_path_attach=volume_ids_path_attach, + ) + init_script = self.create_userdata( volume_ids_path_new=volume_ids_path_new, volume_ids_path_attach=volume_ids_path_attach, @@ -1375,7 +1375,7 @@ def start_server_with_playbook( if key_name: self.delete_keypair(key_name=key_name) - logger.exception(f"Start Server {servername} error:{e}") + logger.exception(f"Start Server {servername} error") raise DefaultException(message=str(e)) def create_deactivate_update_script(self) -> str: @@ -1391,27 +1391,23 @@ def create_deactivate_update_script(self) -> str: def add_udp_security_group(self, server_id): logger.info(f"Setting up UDP security group for {server_id}") server = self.get_server(openstack_id=server_id) - sec = self.openstack_connection.get_security_group( - name_or_id=server.name + "_udp" - ) - if sec: - logger.info( - f"UDP Security group with name {server.name + '_udp'} already exists." - ) + sec_name = server.name + "_udp" + existing_sec = self.openstack_connection.get_security_group(name_or_id=sec_name) + if existing_sec: + logger.info(f"UDP Security group with name {sec_name} already exists.") server_security_groups = ( self.openstack_connection.list_server_security_groups(server) ) + for sg in server_security_groups: - if sg["name"] == server.name + "_udp": + if sg["name"] == sec_name: logger.info( - "UDP Security group with name {} already added to server.".format( - server.name + "_udp" - ) + f"UDP Security group with name {sec_name} already added to server." ) return self.openstack_connection.compute.add_security_group_to_server( - server=server_id, security_group=sec + server=server_id, security_group=existing_sec ) return @@ -1419,14 +1415,13 @@ def add_udp_security_group(self, server_id): udp_port = vm_ports["udp"] security_group = self.create_security_group( - name=server.name + "_udp", + name=sec_name, udp_port=int(udp_port), udp=True, ssh=False, description="UDP", ) - logger.info(security_group) - logger.info(f"Add security group {security_group.id} to server {server_id} ") + logger.info(f"Add security group {security_group.id} to server {server_id}") self.openstack_connection.compute.add_security_group_to_server( server=server_id, security_group=security_group ) diff --git a/simple_vm_client/test_openstack_connector.py b/simple_vm_client/test_openstack_connector.py index 604f62f..89efed7 100644 --- a/simple_vm_client/test_openstack_connector.py +++ b/simple_vm_client/test_openstack_connector.py @@ -14,10 +14,12 @@ from openstack.exceptions import ConflictException, ResourceFailure, ResourceNotFound from openstack.image.v2 import image from openstack.image.v2 import image as image_module +from openstack.network.v2 import security_group from openstack.network.v2.network import Network from openstack.test import fakes from oslo_utils import encodeutils + from .openstack_connector.openstack_connector import OpenStackConnector from .ttypes import ( DefaultException, @@ -112,12 +114,26 @@ def setUp(self): self.mock_openstack_connection ) self.openstack_connector.DEFAULT_SECURITY_GROUPS = DEFAULT_SECURITY_GROUPS + self.openstack_connector.DEACTIVATE_UPGRADES_SCRIPT = ( + self.openstack_connector.create_deactivate_update_script() + ) + self.openstack_connector.GATEWAY_SECURITY_GROUP_ID = ( + "dedasdasdasdadew1231231" + ) + with tempfile.NamedTemporaryFile(mode="w+", delete=False) as temp_file: + temp_file.write(CONFIG_DATA) + + # Call the load_config_yml method with the temporary file path + self.openstack_connector.load_config_yml(temp_file.name) + + # Assert that the configuration attributes are set correctly def init_openstack_connector(self): with patch.object(OpenStackConnector, "__init__", lambda x, y, z: None): openstack_connector = OpenStackConnector(None, None) openstack_connector.openstack_connection = self.mock_openstack_connection openstack_connector.DEFAULT_SECURITY_GROUPS = DEFAULT_SECURITY_GROUPS + return openstack_connector def test_load_config_yml(self): @@ -1176,6 +1192,451 @@ def test_get_public_images(self, mock_logger_info): self.assertEqual(result_images, images[:2]) # Exclude the private image mock_logger_info.assert_any_call("Get public images") + @patch.object(OpenStackConnector, "get_image") + @patch.object(OpenStackConnector, "get_flavor") + @patch.object(OpenStackConnector, "get_network") + @patch.object(OpenStackConnector, "create_server") + @mock.patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") + def test_add_cluster_machine( + self, + mock_logger_info, + mock_create_server, + mock_get_network, + mock_get_flavor, + mock_get_image, + ): + # Arrange + cluster_id = "123" + cluster_user = "user1" + cluster_group_id = ["group1", "group2"] + image_name = "image1" + flavor_name = "flavor1" + name = "machine1" + key_name = "key1" + batch_idx = 1 + worker_idx = 2 + + # Mock responses from get_image, get_flavor, and get_network + mock_image = MagicMock() + mock_get_image.return_value = mock_image + + mock_flavor = MagicMock() + mock_get_flavor.return_value = mock_flavor + + mock_network = MagicMock() + mock_get_network.return_value = mock_network + + # Mock response from create_server + mock_server = {"id": "server123"} + mock_create_server.return_value = mock_server + + # Act + result = self.openstack_connector.add_cluster_machine( + cluster_id, + cluster_user, + cluster_group_id, + image_name, + flavor_name, + name, + key_name, + batch_idx, + worker_idx, + ) + + # Assert + mock_get_image.assert_called_once_with( + name_or_id=image_name, replace_inactive=True + ) + mock_get_flavor.assert_called_once_with(name_or_id=flavor_name) + mock_get_network.assert_called_once() + mock_create_server.assert_called_once_with( + name=name, + image_id=mock_image.id, + flavor_id=mock_flavor.id, + network_id=mock_network.id, + userdata=self.openstack_connector.DEACTIVATE_UPGRADES_SCRIPT, + key_name=key_name, + metadata={ + "bibigrid-id": cluster_id, + "user": cluster_user, + "worker-batch": str(batch_idx), + "name": name, + "worker-index": str(worker_idx), + }, + security_groups=cluster_group_id, + ) + mock_logger_info.assert_any_call(f"Add machine to {cluster_id}") + + mock_logger_info.assert_any_call(f"Created cluster machine:{mock_server['id']}") + self.assertEqual(result, mock_server["id"]) + + def test_add_udp_security_group_existing_group(self): + # Test when an existing UDP security group is found + server = fakes.generate_fake_resource(Server) + sec_group = fakes.generate_fake_resource(security_group.SecurityGroup) + sec_group.name = server.name + "_udp" + # Mocking an existing security group + self.openstack_connector.openstack_connection.get_security_group.return_value = ( + sec_group + ) + # Mocking server security groups + self.openstack_connector.openstack_connection.list_server_security_groups.return_value = [ + sec_group + ] + + # Call the method + self.openstack_connector.add_udp_security_group(server.id) + + # Assertions + self.openstack_connector.openstack_connection.compute.add_security_group_to_server.assert_called_once_with( + server=server.id, security_group=sec_group + ) + + @patch.object(OpenStackConnector, "get_vm_ports") + @patch.object(OpenStackConnector, "create_security_group") + @mock.patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") + def test_add_udp_security_group_new_group( + self, mock_logger_info, mock_create_security_group, mock_get_vm_ports + ): + # Test when a new UDP security group needs to be created + + server = fakes.generate_fake_resource(Server) + sec_group = fakes.generate_fake_resource(security_group.SecurityGroup) + sec_group.name = server.name + "_udp" + udp_port = 30001 + + # Mocking a non-existing security group + self.openstack_connector.openstack_connection.get_security_group.return_value = ( + None + ) + self.openstack_connector.openstack_connection.get_server_by_id.return_value = ( + server + ) + # Mocking VM ports + mock_get_vm_ports.return_value = {"udp": udp_port} + mock_create_security_group.return_value = sec_group + self.openstack_connector.get_vm_ports = mock_get_vm_ports + self.openstack_connector.create_security_group = mock_create_security_group + + # Call the method + self.openstack_connector.add_udp_security_group(server.id) + self.openstack_connector.openstack_connection.get_server_by_id.assert_called_once_with( + id=server.id + ) + + # Assertions + mock_create_security_group.assert_called_once_with( + name=sec_group.name, + udp_port=udp_port, + udp=True, + ssh=False, + description="UDP", + ) + self.openstack_connector.openstack_connection.compute.add_security_group_to_server.assert_called_once_with( + server=server.id, security_group=sec_group + ) + mock_logger_info.assert_any_call( + f"Setting up UDP security group for {server.id}" + ) + mock_logger_info.assert_any_call( + (f"Add security group {sec_group.id} to server {server.id}") + ) + + @mock.patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") + def test_add_udp_security_group_already_added(self, mock_logger_info): + # Test when an existing UDP security group is found + server = fakes.generate_fake_resource(Server) + + sec_group = fakes.generate_fake_resource(security_group.SecurityGroup) + sec_group.name = server.name + "_udp" + + # Mocking an existing security group + self.openstack_connector.openstack_connection.get_server_by_id.return_value = ( + server + ) + self.openstack_connector.openstack_connection.get_security_group.return_value = ( + sec_group + ) + # Mocking server security groups + self.openstack_connector.openstack_connection.list_server_security_groups.return_value = [ + sec_group + ] + self.openstack_connector.add_udp_security_group(server.id) + self.openstack_connector.openstack_connection.add_security_group_to_server.assert_not_called() + mock_logger_info.assert_any_call( + f"Setting up UDP security group for {server.id}" + ) + mock_logger_info.assert_any_call( + f"UDP Security group with name {sec_group.name} already exists." + ) + + mock_logger_info.assert_any_call( + f"UDP Security group with name {sec_group.name} already added to server." + ) + + @patch.object(OpenStackConnector, "_get_security_groups_starting_machine") + @patch.object(OpenStackConnector, "_get_volumes_machines_start") + @patch.object(OpenStackConnector, "create_userdata") + @patch.object(OpenStackConnector, "delete_keypair") + def test_start_server_with_playbook( + self, + mock_delete_keypair, + mock_create_userdata, + mock_get_volumes, + mock_get_security_groups_starting_machine, + ): + server = fakes.generate_fake_resource(Server) + server_keypair = fakes.generate_fake_resource(keypair.Keypair) + fake_image = fakes.generate_fake_resource(image.Image) + fake_image.status = "active" + fake_flavor = fakes.generate_fake_resource(flavor.Flavor) + fake_network = fakes.generate_fake_resource(Network) + + # Set up mocks + self.openstack_connector.openstack_connection.create_server.return_value = ( + server + ) + self.openstack_connector.openstack_connection.create_keypair.return_value = ( + server_keypair + ) + mock_get_security_groups_starting_machine.return_value = ["sg1", "sg2"] + self.openstack_connector.openstack_connection.get_image.return_value = ( + fake_image + ) + self.openstack_connector.openstack_connection.get_flavor.return_value = ( + fake_flavor + ) + self.openstack_connector.openstack_connection.network.find_network.return_value = ( + fake_network + ) + mock_get_volumes.return_value = ["volume1", "volume2"] + mock_create_userdata.return_value = "userdata" + + # Set necessary input parameters + flavor_name = fake_flavor.name + image_name = fake_image.name + servername = server.name + metadata = {"project_name": "mock_project", "project_id": "mock_project_id"} + research_environment_metadata = MagicMock() + volume_ids_path_new = [ + {"openstack_id": "volume_id1"}, + {"openstack_id": "volume_id2"}, + ] + volume_ids_path_attach = [{"openstack_id": "volume_id3"}] + additional_keys = ["key1", "key2"] + additional_security_group_ids = ["sg3", "sg4"] + + # Call the method + result = self.openstack_connector.start_server_with_playbook( + flavor_name, + image_name, + servername, + metadata, + research_environment_metadata, + volume_ids_path_new, + volume_ids_path_attach, + additional_keys, + additional_security_group_ids, + ) + + # Assertions + self.openstack_connector.openstack_connection.create_server.assert_called_once_with( + name=server.name, + image=fake_image.id, + flavor=fake_flavor.id, + network=[fake_network.id], + key_name=servername, + meta=metadata, + volumes=["volume1", "volume2"], + userdata="userdata", + security_groups=["sg1", "sg2"], + ) + + mock_create_userdata.assert_called_once_with( + volume_ids_path_new=volume_ids_path_new, + volume_ids_path_attach=volume_ids_path_attach, + additional_keys=additional_keys, + ) + + mock_get_security_groups_starting_machine.assert_called_once_with( + additional_security_group_ids=additional_security_group_ids, + project_name="mock_project", + project_id="mock_project_id", + research_environment_metadata=research_environment_metadata, + ) + + self.openstack_connector.openstack_connection.create_keypair.assert_called_once_with( + name=servername + ) + + mock_get_volumes.assert_called_once_with( + volume_ids_path_new=volume_ids_path_new, + volume_ids_path_attach=volume_ids_path_attach, + ) + + mock_delete_keypair.assert_called_once_with(key_name=server_keypair.name) + + # Check the result + self.assertEqual(result, (server.id, server_keypair.private_key)) + + @patch.object(OpenStackConnector, "_get_security_groups_starting_machine") + @patch.object(OpenStackConnector, "_get_volumes_machines_start") + @patch.object(OpenStackConnector, "create_userdata") + @patch.object(OpenStackConnector, "delete_keypair") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") + def test_start_server_with_playbook_exception( + self, + mock_logger_exception, + mock_delete_keypair, + mock_create_userdata, + mock_get_volumes, + mock_get_security_groups_starting_machine, + ): + server = fakes.generate_fake_resource(Server) + server_keypair = fakes.generate_fake_resource(keypair.Keypair) + fake_image = fakes.generate_fake_resource(image.Image) + fake_image.status = "active" + fake_flavor = fakes.generate_fake_resource(flavor.Flavor) + fake_network = fakes.generate_fake_resource(Network) + + # Set up mocks + self.openstack_connector.openstack_connection.create_server.return_value = ( + server + ) + self.openstack_connector.openstack_connection.create_keypair.return_value = ( + server_keypair + ) + mock_get_security_groups_starting_machine.return_value = ["sg1", "sg2"] + self.openstack_connector.openstack_connection.get_image.return_value = ( + fake_image + ) + self.openstack_connector.openstack_connection.get_flavor.return_value = ( + fake_flavor + ) + self.openstack_connector.openstack_connection.network.find_network.return_value = ( + fake_network + ) + mock_get_volumes.side_effect = OpenStackCloudException("Unit Test Error") + flavor_name = fake_flavor.name + image_name = fake_image.name + servername = server.name + metadata = {"project_name": "mock_project", "project_id": "mock_project_id"} + research_environment_metadata = MagicMock() + volume_ids_path_new = [ + {"openstack_id": "volume_id1"}, + {"openstack_id": "volume_id2"}, + ] + volume_ids_path_attach = [{"openstack_id": "volume_id3"}] + additional_keys = ["key1", "key2"] + additional_security_group_ids = ["sg3", "sg4"] + + with self.assertRaises(DefaultException): + self.openstack_connector.start_server_with_playbook( + flavor_name, + image_name, + servername, + metadata, + research_environment_metadata, + volume_ids_path_new, + volume_ids_path_attach, + additional_keys, + additional_security_group_ids, + ) + mock_delete_keypair.assert_called_once_with(key_name=server_keypair.name) + mock_logger_exception.assert_called_once_with( + (f"Start Server {servername} error") + ) + + @patch.object(OpenStackConnector, "_get_default_security_groups") + @patch.object( + OpenStackConnector, "get_or_create_research_environment_security_group" + ) + @patch.object(OpenStackConnector, "get_or_create_project_security_group") + def test_get_security_groups_starting_machine( + self, + mock_get_project_sg, + mock_get_research_env_sg, + mock_get_default_security_groups, + ): + # Set up mocks + fake_default_security_group = fakes.generate_fake_resource( + security_group.SecurityGroup + ) + fake_project_security_group = fakes.generate_fake_resource( + security_group.SecurityGroup + ) + mock_get_default_security_groups.return_value = [fake_default_security_group.id] + mock_get_research_env_sg.return_value = "research_env_sg" + mock_get_project_sg.return_value = fake_project_security_group.id + self.openstack_connector.openstack_connection.get_security_group.side_effect = [ + {"id": "additional_sg1"}, + {"id": "additional_sg2"}, + ] + # Set necessary input parameters + additional_security_group_ids = ["additional_sg1", "additional_sg2"] + project_name = "mock_project" + project_id = "mock_project_id" + research_environment_metadata = MagicMock() + + # Call the method + result = self.openstack_connector._get_security_groups_starting_machine( + additional_security_group_ids, + project_name, + project_id, + research_environment_metadata, + ) + + # Assertions + mock_get_default_security_groups.assert_called_once() + + mock_get_research_env_sg.assert_called_once_with( + resenv_metadata=research_environment_metadata + ) + mock_get_project_sg.assert_called_once_with( + project_name=project_name, project_id=project_id + ) + + self.openstack_connector.openstack_connection.get_security_group.assert_has_calls( + [call(name_or_id="additional_sg1"), call(name_or_id="additional_sg2")] + ) + # Check the result + expected_result = [ + "research_env_sg", + fake_default_security_group.id, + fake_project_security_group.id, + "additional_sg1", + "additional_sg2", + ] + self.assertCountEqual(result, expected_result) + + def test_get_volumes_machines_start(self): + fake_vol_1 = fakes.generate_fake_resource(volume.Volume) + fake_vol_2 = fakes.generate_fake_resource(volume.Volume) + + # Set up mock + self.openstack_connector.openstack_connection.get_volume.side_effect = [ + fake_vol_1, + fake_vol_2, + ] + + # Set necessary input parameters + volume_ids_path_new = [{"openstack_id": fake_vol_1.id}] + volume_ids_path_attach = [{"openstack_id": fake_vol_2.id}] + + # Call the method + result = self.openstack_connector._get_volumes_machines_start( + volume_ids_path_new, volume_ids_path_attach + ) + + # Assertions + self.openstack_connector.openstack_connection.get_volume.assert_has_calls( + [call(name_or_id=fake_vol_1.id), call(name_or_id=fake_vol_2.id)] + ) + + # Check the result + expected_result = [fake_vol_1, fake_vol_2] + self.assertEqual(result, expected_result) + if __name__ == "__main__": unittest.main() From 3f904733388ffaf0c954256ec407f0026299d157 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Wed, 3 Jan 2024 16:02:53 +0100 Subject: [PATCH 30/39] added more tests openstack --- .../openstack_connector.py | 302 +++--- simple_vm_client/test_openstack_connector.py | 872 +++++++++++++++++- 2 files changed, 973 insertions(+), 201 deletions(-) diff --git a/simple_vm_client/openstack_connector/openstack_connector.py b/simple_vm_client/openstack_connector/openstack_connector.py index 67d2690..187479e 100644 --- a/simple_vm_client/openstack_connector/openstack_connector.py +++ b/simple_vm_client/openstack_connector/openstack_connector.py @@ -141,7 +141,7 @@ def load_env_config(self) -> None: sys.exit(1) self.USE_APPLICATION_CREDENTIALS = ( - os.environ.get("USE_APPLICATION_CREDENTIALS", "False").lower() == "true" + os.environ.get("USE_APPLICATION_CREDENTIALS", "False").lower() == "true" ) if self.USE_APPLICATION_CREDENTIALS: @@ -183,15 +183,15 @@ def load_env_config(self) -> None: self.PROJECT_DOMAIN_ID = os.environ["OS_PROJECT_DOMAIN_ID"] def create_server( - self, - name: str, - image_id: str, - flavor_id: str, - network_id: str, - userdata: str, - key_name: str, - metadata: dict[str, str], - security_groups: list[str], + self, + name: str, + image_id: str, + flavor_id: str, + network_id: str, + userdata: str, + key_name: str, + metadata: dict[str, str], + security_groups: list[str], ) -> Server: logger.info( f"Create Server:\n\tname: {name}\n\timage_id:{image_id}\n\tflavor_id:{flavor_id}\n\tmetadata:{metadata}" @@ -233,7 +233,7 @@ def delete_volume(self, volume_id: str) -> None: raise DefaultException(message=e.message) def create_volume_snapshot( - self, volume_id: str, name: str, description: str + self, volume_id: str, name: str, description: str ) -> str: try: logger.info(f"Create Snapshot for Volume {volume_id}") @@ -276,7 +276,7 @@ def delete_volume_snapshot(self, snapshot_id: str) -> None: raise DefaultException(message=e.message) def create_volume_by_source_volume( - self, volume_name: str, metadata: dict[str, str], source_volume_id: str + self, volume_name: str, metadata: dict[str, str], source_volume_id: str ) -> Volume: logger.info(f"Creating volume from source volume with id {source_volume_id}") try: @@ -292,7 +292,7 @@ def create_volume_by_source_volume( raise ResourceNotAvailableException(message=e.message) def create_volume_by_volume_snap( - self, volume_name: str, metadata: dict[str, str], volume_snap_id: str + self, volume_name: str, metadata: dict[str, str], volume_snap_id: str ) -> Volume: logger.info(f"Creating volume from volume snapshot with id {volume_snap_id}") try: @@ -330,7 +330,7 @@ def get_servers_by_ids(self, ids: list[str]) -> list[Server]: return servers def attach_volume_to_server( - self, openstack_id: str, volume_id: str + self, openstack_id: str, volume_id: str ) -> dict[str, str]: server = self.get_server(openstack_id=openstack_id) volume = self.get_volume(name_or_id=volume_id) @@ -371,7 +371,7 @@ def resize_volume(self, volume_id: str, size: int) -> None: raise DefaultException(message=str(e)) def create_volume( - self, volume_name: str, volume_storage: int, metadata: dict[str, str] + self, volume_name: str, volume_storage: int, metadata: dict[str, str] ) -> Volume: logger.info(f"Creating volume with {volume_storage} GB storage") try: @@ -488,9 +488,9 @@ def get_active_image_by_os_version(self, os_version: str, os_distro: str) -> Ima image_os_distro = metadata.get("os_distro", None) base_image_ref = metadata.get("base_image_ref", None) if ( - os_version == image_os_version - and image.status == "active" - and base_image_ref is None + os_version == image_os_version + and image.status == "active" + and base_image_ref is None ): if os_distro and os_distro == image_os_distro: return image @@ -502,11 +502,11 @@ def get_active_image_by_os_version(self, os_version: str, os_distro: str) -> Ima ) def get_image( - self, - name_or_id: str, - replace_inactive: bool = False, - ignore_not_active: bool = False, - ignore_not_found: bool = False, + self, + name_or_id: str, + replace_inactive: bool = False, + ignore_not_active: bool = False, + ignore_not_found: bool = False, ) -> Image: logger.info(f"Get Image {name_or_id}") @@ -530,12 +530,12 @@ def get_image( return image def create_snapshot( - self, - openstack_id: str, - name: str, - username: str, - base_tags: list[str], - description: str, + self, + openstack_id: str, + name: str, + username: str, + base_tags: list[str], + description: str, ) -> str: logger.info( f"Create Snapshot from Instance {openstack_id} with name {name} for {username}" @@ -647,9 +647,9 @@ def get_gateway_ip(self) -> dict[str, str]: return {"gateway_ip": self.GATEWAY_IP} def create_mount_init_script( - self, - new_volumes: list[dict[str, str]] = None, # type: ignore - attach_volumes: list[dict[str, str]] = None, # type: ignore + self, + new_volumes: list[dict[str, str]] = None, # type: ignore + attach_volumes: list[dict[str, str]] = None, # type: ignore ) -> str: logger.info(f"Create init script for volume ids:{new_volumes}") if not new_volumes and not attach_volumes: @@ -730,7 +730,7 @@ def delete_security_group_rule(self, openstack_id): ) def open_port_range_for_vm_in_project( - self, range_start, range_stop, openstack_id, ethertype="IPV4", protocol="TCP" + self, range_start, range_stop, openstack_id, ethertype="IPV4", protocol="TCP" ): server: Server = self.openstack_connection.get_server_by_id(id=openstack_id) if server is None: @@ -779,13 +779,13 @@ def open_port_range_for_vm_in_project( raise OpenStackConflictException(message=e.message) def create_security_group( - self, - name: str, - udp_port: int = None, # type: ignore - ssh: bool = True, - udp: bool = False, - description: str = "", - research_environment_metadata: ResearchEnvironmentMetadata = None, + self, + name: str, + udp_port: int = None, # type: ignore + ssh: bool = True, + udp: bool = False, + description: str = "", + research_environment_metadata: ResearchEnvironmentMetadata = None, ) -> SecurityGroup: logger.info(f"Create new security group {name}") sec: SecurityGroup = self.openstack_connection.get_security_group( @@ -843,6 +843,8 @@ def create_security_group( remote_group_id=self.GATEWAY_SECURITY_GROUP_ID, ) if research_environment_metadata: + logger.info(f"Add research env rule to security group {name}") + self.openstack_connection.network.create_security_group_rule( direction=research_environment_metadata.direction, protocol=research_environment_metadata.protocol, @@ -892,7 +894,7 @@ def is_security_group_in_use(self, security_group_id): return False def get_or_create_research_environment_security_group( - self, resenv_metadata: ResearchEnvironmentMetadata + self, resenv_metadata: ResearchEnvironmentMetadata ): if not resenv_metadata.needs_forc_support: return None @@ -913,7 +915,7 @@ def get_or_create_research_environment_security_group( ) new_security_group = self.openstack_connection.create_security_group( - name=resenv_metadata.securitygroup_name, description=resenv_metadata.name + name=resenv_metadata.securitygroup_name, description=resenv_metadata.description ) self.openstack_connection.network.create_security_group_rule( direction=resenv_metadata.direction, @@ -969,8 +971,7 @@ def get_or_create_project_security_group(self, project_name, project_id): def get_limits(self) -> dict[str, str]: logger.info("Get Limits") - limits = {} - limits.update(self.openstack_connection.get_compute_limits()) + limits = self.openstack_connection.get_compute_limits() limits.update(self.openstack_connection.get_volume_limits()["absolute"]) return { @@ -980,10 +981,10 @@ def get_limits(self) -> dict[str, str]: "current_used_cores": str(limits["total_cores_used"]), "current_used_vms": str(limits["total_instances_used"]), "current_used_ram": str(math.ceil(limits["total_ram_used"] / 1024)), - "volume_counter_limit": str(limits["maxTotalVolumes"]), - "volume_storage_limit": str(limits["maxTotalVolumeGigabytes"]), - "current_used_volumes": str(limits["totalVolumesUsed"]), - "current_used_volume_storage": str(limits["totalGigabytesUsed"]), + "volume_counter_limit": str(limits["max_total_volumes"]), + "volume_storage_limit": str(limits["max_total_volume_gigabytes"]), + "current_used_volumes": str(limits["total_volumes_used"]), + "current_used_volume_storage": str(limits["total_gigabytes_used"]), } def exist_server(self, name: str) -> bool: @@ -995,13 +996,7 @@ def exist_server(self, name: str) -> bool: def set_server_metadata(self, openstack_id: str, metadata) -> None: try: logger.info(f"Set Server Metadata: {openstack_id} --> {metadata}") - server: Server = self.openstack_connection.get_server_by_id(id=openstack_id) - if server is None: - logger.exception(f"Instance {openstack_id} not found") - raise ServerNotFoundException( - message=f"Instance {openstack_id} not found", - name_or_id=openstack_id, - ) + server: Server = self.get_server(openstack_id) self.openstack_connection.compute.set_server_metadata(server, metadata) except OpenStackCloudException as e: raise DefaultException( @@ -1019,17 +1014,7 @@ def get_server(self, openstack_id: str) -> Server: name_or_id=openstack_id, ) if server.vm_state == VmStates.ACTIVE.value: - fixed_ip = server.private_v4 - base_port = int(fixed_ip.split(".")[-1]) # noqa F841 - subnet_port = int(fixed_ip.split(".")[-2]) # noqa F841 - - x = sympy.symbols("x") - y = sympy.symbols("y") - ssh_port = int( - sympy.sympify(self.SSH_PORT_CALCULATION).evalf( - subs={x: base_port, y: subnet_port} - ) - ) + ssh_port, udp_port = self._calculate_vm_ports(server=server) if not self.netcat(host=self.GATEWAY_IP, port=ssh_port): server.task_state = VmTaskStates.CHECKING_SSH_CONNECTION.value @@ -1052,12 +1037,6 @@ def resume_server(self, openstack_id: str) -> None: logger.info(f"Resume Server {openstack_id}") try: server = self.get_server(openstack_id=openstack_id) - if server is None: - logger.exception(f"Instance {openstack_id} not found") - raise ServerNotFoundException( - message=f"Instance {openstack_id} not found", - name_or_id=openstack_id, - ) self.openstack_connection.compute.start_server(server) except ConflictException as e: @@ -1084,23 +1063,45 @@ def stop_server(self, openstack_id: str) -> None: logger.info(f"Stop Server {openstack_id}") server = self.get_server(openstack_id=openstack_id) try: - if server is None: - raise ServerNotFoundException( - message=f"Instance {openstack_id} not found", - name_or_id=openstack_id, - ) - self.openstack_connection.compute.stop_server(server) except ConflictException as e: logger.exception(f"Stop Server {openstack_id} failed!") raise OpenStackConflictException(message=e.message) + def _remove_security_groups_from_server(self, server: Server) -> None: + security_groups = server.security_groups + + if security_groups is not None: + for sg in security_groups: + sec = self.openstack_connection.get_security_group(name_or_id=sg["name"]) + logger.info(f"Remove security group {sec.id} from server {server.id}") + self.openstack_connection.compute.remove_security_group_from_server( + server=server, security_group=sec + ) + + if ( + sg["name"] != self.DEFAULT_SECURITY_GROUP_NAME + and ("bibigrid" not in sec.name or "master" not in server.name) + and not self.is_security_group_in_use(security_group_id=sec.id) + ): + logger.info(f"Delete security group {sec}") + + self.openstack_connection.delete_security_group(sec) + + def _validate_server_for_deletion(self, server: Server) -> None: + task_state = server.task_state + if task_state in [ + "image_snapshot", + "image_pending_upload", + "image_uploading", + ]: + raise ConflictException("task_state in image creating") + def delete_server(self, openstack_id: str) -> None: logger.info(f"Delete Server {openstack_id}") try: - server = self.get_server(openstack_id=openstack_id) - + server: Server = self.get_server(openstack_id=openstack_id) if not server: logger.error(f"Instance {openstack_id} not found") raise ServerNotFoundException( @@ -1108,29 +1109,8 @@ def delete_server(self, openstack_id: str) -> None: name_or_id=openstack_id, ) - task_state = server.get("task_state", None) - if task_state in [ - "image_snapshot", - "image_pending_upload", - "image_uploading", - ]: - raise ConflictException("task_state in image creating") - security_groups = server["security_groups"] - if security_groups is not None: - for sg in security_groups: - sec = self.openstack_connection.get_security_group( - name_or_id=sg["name"] - ) - logger.info(f"Delete security group {sec}") - self.openstack_connection.compute.remove_security_group_from_server( - server=server, security_group=sec - ) - if ( - sg["name"] != self.DEFAULT_SECURITY_GROUP_NAME - and ("bibigrid" not in sec.name or "master" not in server.name) - and not self.is_security_group_in_use(security_group_id=sec.id) - ): - self.openstack_connection.delete_security_group(sg) + self._validate_server_for_deletion(server=server) + self._remove_security_groups_from_server(server=server) self.openstack_connection.compute.delete_server(server.id, force=True) except ConflictException as e: @@ -1138,14 +1118,8 @@ def delete_server(self, openstack_id: str) -> None: raise OpenStackConflictException(message=e.message) - def get_vm_ports(self, openstack_id: str) -> dict[str, str]: - logger.info(f"Get IP and PORT for server {openstack_id}") - server = self.get_server(openstack_id=openstack_id) - if not server: - raise ServerNotFoundException( - message=f"Server {openstack_id} not found!", name_or_id=openstack_id - ) - fixed_ip = server["private_v4"] + def _calculate_vm_ports(self, server: Server): + fixed_ip = server.private_v4 base_port = int(fixed_ip.split(".")[-1]) # noqa F841 subnet_port = int(fixed_ip.split(".")[-2]) # noqa F841 @@ -1161,14 +1135,20 @@ def get_vm_ports(self, openstack_id: str) -> dict[str, str]: subs={x: base_port, y: subnet_port} ) ) + return ssh_port, udp_port + + def get_vm_ports(self, openstack_id: str) -> dict[str, str]: + logger.info(f"Get IP and PORT for server {openstack_id}") + server = self.get_server(openstack_id=openstack_id) + ssh_port, udp_port = self._calculate_vm_ports(server=server) return {"port": str(ssh_port), "udp": str(udp_port)} def create_userdata( - self, - volume_ids_path_new: list[dict[str, str]], - volume_ids_path_attach: list[dict[str, str]], - additional_keys: list[str], + self, + volume_ids_path_new: list[dict[str, str]], + volume_ids_path_attach: list[dict[str, str]], + additional_keys: list[str], ) -> str: unlock_ubuntu_user_script = "#!/bin/bash\npasswd -u ubuntu\n" unlock_ubuntu_user_script_encoded = encodeutils.safe_encode( @@ -1179,9 +1159,9 @@ def create_userdata( if additional_keys: add_key_script = self.create_add_keys_script(keys=additional_keys) init_script = ( - add_key_script - + encodeutils.safe_encode("\n".encode("utf-8")) - + init_script + add_key_script + + encodeutils.safe_encode("\n".encode("utf-8")) + + init_script ) if volume_ids_path_new or volume_ids_path_attach: mount_script = self.create_mount_init_script( @@ -1189,25 +1169,25 @@ def create_userdata( attach_volumes=volume_ids_path_attach, ) init_script = ( - init_script - + encodeutils.safe_encode("\n".encode("utf-8")) - + mount_script + init_script + + encodeutils.safe_encode("\n".encode("utf-8")) + + mount_script ) return init_script def start_server( - self, - flavor_name: str, - image_name: str, - servername: str, - metadata: dict[str, str], - public_key: str, - research_environment_metadata: Union[ResearchEnvironmentMetadata, None] = None, - volume_ids_path_new: Union[list[dict[str, str]], None] = None, - volume_ids_path_attach: Union[list[dict[str, str]], None] = None, - additional_keys: Union[list[str], None] = None, - additional_security_group_ids: Union[list[str], None] = None, + self, + flavor_name: str, + image_name: str, + servername: str, + metadata: dict[str, str], + public_key: str, + research_environment_metadata: Union[ResearchEnvironmentMetadata, None] = None, + volume_ids_path_new: Union[list[dict[str, str]], None] = None, + volume_ids_path_attach: Union[list[dict[str, str]], None] = None, + additional_keys: Union[list[str], None] = None, + additional_security_group_ids: Union[list[str], None] = None, ) -> str: logger.info(f"Start Server {servername}") @@ -1261,13 +1241,13 @@ def start_server( if key_name: self.delete_keypair(key_name=key_name) - logger.exception(f"Start Server {servername} error:{e}") + logger.exception(f"Start Server {servername} error") raise DefaultException(message=str(e)) def _get_volumes_machines_start( - self, - volume_ids_path_new: list[dict[str, str]] = None, - volume_ids_path_attach: list[dict[str, str]] = None, + self, + volume_ids_path_new: list[dict[str, str]] = None, + volume_ids_path_attach: list[dict[str, str]] = None, ) -> list[Volume]: volume_ids = [] volumes = [] @@ -1281,11 +1261,11 @@ def _get_volumes_machines_start( return volumes def _get_security_groups_starting_machine( - self, - additional_security_group_ids: Union[list[str], None] = None, - project_name: Union[str, None] = None, - project_id: Union[str, None] = None, - research_environment_metadata: Union[ResearchEnvironmentMetadata, None] = None, + self, + additional_security_group_ids: Union[list[str], None] = None, + project_name: Union[str, None] = None, + project_id: Union[str, None] = None, + research_environment_metadata: Union[ResearchEnvironmentMetadata, None] = None, ) -> list[str]: security_groups = self._get_default_security_groups() if research_environment_metadata: @@ -1310,16 +1290,16 @@ def _get_security_groups_starting_machine( return security_groups def start_server_with_playbook( - self, - flavor_name: str, - image_name: str, - servername: str, - metadata: dict[str, str], - research_environment_metadata: ResearchEnvironmentMetadata, - volume_ids_path_new: list[dict[str, str]] = None, # type: ignore - volume_ids_path_attach: list[dict[str, str]] = None, # type: ignore - additional_keys: list[str] = None, # type: ignore - additional_security_group_ids=None, # type: ignore + self, + flavor_name: str, + image_name: str, + servername: str, + metadata: dict[str, str], + research_environment_metadata: ResearchEnvironmentMetadata, + volume_ids_path_new: list[dict[str, str]] = None, # type: ignore + volume_ids_path_attach: list[dict[str, str]] = None, # type: ignore + additional_keys: list[str] = None, # type: ignore + additional_security_group_ids=None, # type: ignore ) -> tuple[str, str]: logger.info(f"Start Server {servername}") @@ -1429,16 +1409,16 @@ def add_udp_security_group(self, server_id): return def add_cluster_machine( - self, - cluster_id: str, - cluster_user: str, - cluster_group_id: list[str], - image_name: str, - flavor_name: str, - name: str, - key_name: str, - batch_idx: int, - worker_idx: int, + self, + cluster_id: str, + cluster_user: str, + cluster_group_id: list[str], + image_name: str, + flavor_name: str, + name: str, + key_name: str, + batch_idx: int, + worker_idx: int, ) -> str: logger.info(f"Add machine to {cluster_id}") image: Image = self.get_image(name_or_id=image_name, replace_inactive=True) diff --git a/simple_vm_client/test_openstack_connector.py b/simple_vm_client/test_openstack_connector.py index 89efed7..468d1d3 100644 --- a/simple_vm_client/test_openstack_connector.py +++ b/simple_vm_client/test_openstack_connector.py @@ -1,10 +1,12 @@ import os +import random import socket import tempfile import unittest from unittest import mock from unittest.mock import MagicMock, call, patch +import pytest from openstack.block_storage.v3 import volume from openstack.block_storage.v3.limits import Limit from openstack.block_storage.v3.volume import Volume @@ -19,7 +21,8 @@ from openstack.test import fakes from oslo_utils import encodeutils - +from simple_vm_client.forc_connector.template.template import ResearchEnvironmentMetadata +from simple_vm_client.util.state_enums import VmStates, VmTaskStates from .openstack_connector.openstack_connector import OpenStackConnector from .ttypes import ( DefaultException, @@ -27,9 +30,54 @@ OpenStackConflictException, ResourceNotAvailableException, SnapshotNotFoundException, - VolumeNotFoundException, + VolumeNotFoundException, ServerNotFoundException, +) + +METADATA_EXAMPLE_NO_FORC = ResearchEnvironmentMetadata( + template_name="example_template", + port="8080", + wiki_link="https://example.com/wiki", + description="Example template for testing", + title="Example Template", + community_driven=True, + logo_url="https://example.com/logo.png", + info_url="https://example.com/info", + securitygroup_name="example_group", + securitygroup_description="Example security group", + securitygroup_ssh=True, + direction="inbound", + protocol="tcp", + information_for_display="Some information", + needs_forc_support=False, + min_ram=2, + min_cores=1, + is_maintained=True, + forc_versions=["1.0.0", "2.0.0"], + incompatible_versions=["3.0.0"], ) +METADATA_EXAMPLE = ResearchEnvironmentMetadata( + template_name="example_template", + port="8080", + wiki_link="https://example.com/wiki", + description="Example template for testing", + title="Example Template", + community_driven=True, + logo_url="https://example.com/logo.png", + info_url="https://example.com/info", + securitygroup_name="example_group", + securitygroup_description="Example security group", + securitygroup_ssh=True, + direction="inbound", + protocol="tcp", + information_for_display="Some information", + needs_forc_support=True, + min_ram=2, + min_cores=1, + is_maintained=True, + forc_versions=["1.0.0", "2.0.0"], + incompatible_versions=["3.0.0"], +) EXPECTED_IMAGE = image_module.Image( id="image_id_2", status="active", @@ -63,15 +111,16 @@ ), INACTIVE_IMAGE, ] +PORT_CALCULATION = "30000 + x + y * 256" DEFAULT_SECURITY_GROUPS = ["defaultSimpleVM"] -CONFIG_DATA = """ +CONFIG_DATA = f""" openstack: gateway_ip: "192.168.1.1" network: "my_network" sub_network: "my_sub_network" cloud_site: "my_cloud_site" - ssh_port_calculation: 22 - udp_port_calculation: 12345 + ssh_port_calculation: {PORT_CALCULATION} + udp_port_calculation: {PORT_CALCULATION} gateway_security_group_id: "security_group_id" production: true forc: @@ -150,8 +199,8 @@ def test_load_config_yml(self): self.assertEqual(self.openstack_connector.SUB_NETWORK, "my_sub_network") self.assertTrue(self.openstack_connector.PRODUCTION) self.assertEqual(self.openstack_connector.CLOUD_SITE, "my_cloud_site") - self.assertEqual(self.openstack_connector.SSH_PORT_CALCULATION, 22) - self.assertEqual(self.openstack_connector.UDP_PORT_CALCULATION, 12345) + self.assertEqual(self.openstack_connector.SSH_PORT_CALCULATION, PORT_CALCULATION) + self.assertEqual(self.openstack_connector.UDP_PORT_CALCULATION, PORT_CALCULATION) self.assertEqual( self.openstack_connector.FORC_SECURITY_GROUP_ID, "forc_security_group_id" ) @@ -402,12 +451,18 @@ def test_replace_inactive_image(self): # Assert that the method returns the replacement image self.assertEqual(result, EXPECTED_IMAGE) - @unittest.skip("Currently not working") def test_get_limits(self): compute_limits = fakes.generate_fake_resource(limits.AbsoluteLimits) volume_limits = fakes.generate_fake_resource(Limit) - self.mock_openstack_connection.get_compute_limits.return_value = compute_limits - self.mock_openstack_connection.get_volume_limits.return_value = volume_limits + compute_copy = {} + for key in compute_limits.keys(): + compute_copy[key] = random.randint(0, 10000) + + absolute_volume = volume_limits["absolute"] + for key in absolute_volume.keys(): + volume_limits["absolute"][key] = random.randint(0, 10000) + self.openstack_connector.openstack_connection.get_compute_limits.return_value = compute_copy + self.openstack_connector.openstack_connection.get_volume_limits.return_value = volume_limits self.openstack_connector.get_limits() @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") @@ -493,7 +548,7 @@ def test_get_volume_exception(self, mock_logger_exception): # Call the get_volume method and expect a VolumeNotFoundException with self.assertRaises( - Exception + Exception ): # Replace Exception with the actual exception type self.openstack_connector.get_volume(name_or_id) @@ -528,21 +583,21 @@ def test_delete_volume(self, mock_logger_exception, mock_logger_info): # 2. ResourceNotFound, expect VolumeNotFoundException with self.assertRaises( - VolumeNotFoundException + VolumeNotFoundException ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume(volume_id) mock_logger_exception.assert_called_with(f"No Volume with id {volume_id}") # 3. ConflictException, expect OpenStackCloudException with self.assertRaises( - OpenStackCloudException + OpenStackCloudException ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume(volume_id) mock_logger_exception.assert_called_with(f"Delete volume: {volume_id}) failed!") # 4. OpenStackCloudException, expect DefaultException with self.assertRaises( - DefaultException + DefaultException ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume(volume_id) @@ -642,14 +697,14 @@ def test_delete_volume_snapshot(self, mock_logger_exception, mock_logger_info): # 2. ResourceNotFound, expect SnapshotNotFoundException with self.assertRaises( - SnapshotNotFoundException + SnapshotNotFoundException ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume_snapshot(snapshot_id) mock_logger_exception.assert_called_with(f"Snapshot not found: {snapshot_id}") # 3. ConflictException, expect OpenStackCloudException with self.assertRaises( - OpenStackCloudException + OpenStackCloudException ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume_snapshot(snapshot_id) mock_logger_exception.assert_called_with( @@ -658,7 +713,7 @@ def test_delete_volume_snapshot(self, mock_logger_exception, mock_logger_info): # 4. OpenStackCloudException, expect DefaultException with self.assertRaises( - DefaultException + DefaultException ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume_snapshot(snapshot_id) @@ -681,7 +736,7 @@ def test_get_servers(self, mock_logger_info): @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_get_servers_by_ids( - self, mock_logger_info, mock_logger_exception, mock_logger_error + self, mock_logger_info, mock_logger_exception, mock_logger_error ): # Prepare test data server_ids = ["id1", "id2", "id3", "id4"] @@ -1198,12 +1253,12 @@ def test_get_public_images(self, mock_logger_info): @patch.object(OpenStackConnector, "create_server") @mock.patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_add_cluster_machine( - self, - mock_logger_info, - mock_create_server, - mock_get_network, - mock_get_flavor, - mock_get_image, + self, + mock_logger_info, + mock_create_server, + mock_get_network, + mock_get_flavor, + mock_get_image, ): # Arrange cluster_id = "123" @@ -1296,7 +1351,7 @@ def test_add_udp_security_group_existing_group(self): @patch.object(OpenStackConnector, "create_security_group") @mock.patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_add_udp_security_group_new_group( - self, mock_logger_info, mock_create_security_group, mock_get_vm_ports + self, mock_logger_info, mock_create_security_group, mock_get_vm_ports ): # Test when a new UDP security group needs to be created @@ -1379,11 +1434,11 @@ def test_add_udp_security_group_already_added(self, mock_logger_info): @patch.object(OpenStackConnector, "create_userdata") @patch.object(OpenStackConnector, "delete_keypair") def test_start_server_with_playbook( - self, - mock_delete_keypair, - mock_create_userdata, - mock_get_volumes, - mock_get_security_groups_starting_machine, + self, + mock_delete_keypair, + mock_create_userdata, + mock_get_volumes, + mock_get_security_groups_starting_machine, ): server = fakes.generate_fake_resource(Server) server_keypair = fakes.generate_fake_resource(keypair.Keypair) @@ -1485,12 +1540,12 @@ def test_start_server_with_playbook( @patch.object(OpenStackConnector, "delete_keypair") @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") def test_start_server_with_playbook_exception( - self, - mock_logger_exception, - mock_delete_keypair, - mock_create_userdata, - mock_get_volumes, - mock_get_security_groups_starting_machine, + self, + mock_logger_exception, + mock_delete_keypair, + mock_create_userdata, + mock_get_volumes, + mock_get_security_groups_starting_machine, ): server = fakes.generate_fake_resource(Server) server_keypair = fakes.generate_fake_resource(keypair.Keypair) @@ -1553,10 +1608,10 @@ def test_start_server_with_playbook_exception( ) @patch.object(OpenStackConnector, "get_or_create_project_security_group") def test_get_security_groups_starting_machine( - self, - mock_get_project_sg, - mock_get_research_env_sg, - mock_get_default_security_groups, + self, + mock_get_project_sg, + mock_get_research_env_sg, + mock_get_default_security_groups, ): # Set up mocks fake_default_security_group = fakes.generate_fake_resource( @@ -1637,6 +1692,743 @@ def test_get_volumes_machines_start(self): expected_result = [fake_vol_1, fake_vol_2] self.assertEqual(result, expected_result) + @patch.object(OpenStackConnector, "_get_security_groups_starting_machine") + @patch.object(OpenStackConnector, "_get_volumes_machines_start") + @patch.object(OpenStackConnector, "create_userdata") + @patch.object(OpenStackConnector, "delete_keypair") + def test_start_server( + self, + mock_delete_keypair, + mock_create_userdata, + mock_get_volumes, + mock_get_security_groups_starting_machine, + ): + server = fakes.generate_fake_resource(Server) + server_keypair = fakes.generate_fake_resource(keypair.Keypair) + server_keypair.name = server.name + "_mock_project" + fake_image = fakes.generate_fake_resource(image.Image) + fake_image.status = "active" + fake_flavor = fakes.generate_fake_resource(flavor.Flavor) + fake_network = fakes.generate_fake_resource(Network) + + # Set up mocks + self.openstack_connector.openstack_connection.create_server.return_value = ( + server + ) + self.openstack_connector.openstack_connection.create_keypair.return_value = ( + server_keypair + ) + mock_get_security_groups_starting_machine.return_value = ["sg1", "sg2"] + self.openstack_connector.openstack_connection.get_image.return_value = ( + fake_image + ) + self.openstack_connector.openstack_connection.get_flavor.return_value = ( + fake_flavor + ) + self.openstack_connector.openstack_connection.network.find_network.return_value = ( + fake_network + ) + self.openstack_connector.openstack_connection.compute.find_keypair.return_value = server_keypair + self.openstack_connector.openstack_connection.compute.get_keypair.return_value = server_keypair + + mock_get_volumes.return_value = ["volume1", "volume2"] + mock_create_userdata.return_value = "userdata" + + # Set necessary input parameters + flavor_name = fake_flavor.name + image_name = fake_image.name + servername = server.name + metadata = {"project_name": "mock_project", "project_id": "mock_project_id"} + research_environment_metadata = MagicMock() + volume_ids_path_new = [ + {"openstack_id": "volume_id1"}, + {"openstack_id": "volume_id2"}, + ] + volume_ids_path_attach = [{"openstack_id": "volume_id3"}] + additional_keys = ["key1", "key2"] + additional_security_group_ids = ["sg3", "sg4"] + public_key = "public_key" + + # Call the method + result = self.openstack_connector.start_server( + flavor_name=flavor_name, + image_name=image_name, + servername=servername, + metadata=metadata, + research_environment_metadata=research_environment_metadata, + volume_ids_path_new=volume_ids_path_new, + volume_ids_path_attach=volume_ids_path_attach, + additional_keys=additional_keys, + additional_security_group_ids=additional_security_group_ids, + public_key=public_key + ) + + # Assertions + self.openstack_connector.openstack_connection.create_server.assert_called_once_with( + name=server.name, + image=fake_image.id, + flavor=fake_flavor.id, + network=[fake_network.id], + key_name=server_keypair.name, + meta=metadata, + volumes=["volume1", "volume2"], + userdata="userdata", + security_groups=["sg1", "sg2"], + ) + + mock_create_userdata.assert_called_once_with( + volume_ids_path_new=volume_ids_path_new, + volume_ids_path_attach=volume_ids_path_attach, + additional_keys=additional_keys, + ) + + mock_get_security_groups_starting_machine.assert_called_once_with( + additional_security_group_ids=additional_security_group_ids, + project_name="mock_project", + project_id="mock_project_id", + research_environment_metadata=research_environment_metadata, + ) + + self.openstack_connector.openstack_connection.create_keypair.assert_called_once_with( + name=server_keypair.name, public_key=public_key + ) + + mock_get_volumes.assert_called_once_with( + volume_ids_path_new=volume_ids_path_new, + volume_ids_path_attach=volume_ids_path_attach, + ) + + self.openstack_connector.openstack_connection.get_keypair.assert_called_once_with(name_or_id=server_keypair.name) + mock_delete_keypair.assert_any_call(key_name=server_keypair.name) + + # Check the result + self.assertEqual(result, server.id) + + @patch.object(OpenStackConnector, "_get_security_groups_starting_machine") + @patch.object(OpenStackConnector, "_get_volumes_machines_start") + @patch.object(OpenStackConnector, "create_userdata") + @patch.object(OpenStackConnector, "delete_keypair") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") + def test_start_server_exception( + self, + mock_logger_exception, + mock_delete_keypair, + mock_create_userdata, + mock_get_volumes, + mock_get_security_groups_starting_machine, + ): + server = fakes.generate_fake_resource(Server) + server_keypair = fakes.generate_fake_resource(keypair.Keypair) + fake_image = fakes.generate_fake_resource(image.Image) + fake_image.status = "active" + fake_flavor = fakes.generate_fake_resource(flavor.Flavor) + fake_network = fakes.generate_fake_resource(Network) + public_key = "public_key" + + # Set up mocks + self.openstack_connector.openstack_connection.create_server.return_value = ( + server + ) + self.openstack_connector.openstack_connection.create_keypair.return_value = ( + server_keypair + ) + mock_get_security_groups_starting_machine.return_value = ["sg1", "sg2"] + self.openstack_connector.openstack_connection.get_image.return_value = ( + fake_image + ) + self.openstack_connector.openstack_connection.get_flavor.return_value = ( + fake_flavor + ) + self.openstack_connector.openstack_connection.network.find_network.return_value = ( + fake_network + ) + mock_get_volumes.side_effect = OpenStackCloudException("Unit Test Error") + flavor_name = fake_flavor.name + image_name = fake_image.name + servername = server.name + metadata = {"project_name": "mock_project", "project_id": "mock_project_id"} + research_environment_metadata = MagicMock() + volume_ids_path_new = [ + {"openstack_id": "volume_id1"}, + {"openstack_id": "volume_id2"}, + ] + volume_ids_path_attach = [{"openstack_id": "volume_id3"}] + additional_keys = ["key1", "key2"] + additional_security_group_ids = ["sg3", "sg4"] + + with self.assertRaises(DefaultException): + self.openstack_connector.start_server( + flavor_name=flavor_name, + image_name=image_name, + servername=servername, + metadata=metadata, + research_environment_metadata=research_environment_metadata, + volume_ids_path_new=volume_ids_path_new, + volume_ids_path_attach=volume_ids_path_attach, + additional_keys=additional_keys, + additional_security_group_ids=additional_security_group_ids, + public_key=public_key + ) + mock_logger_exception.assert_any_call( + (f"Start Server {servername} error") + ) + + @patch.object(OpenStackConnector, "create_add_keys_script") + @patch.object(OpenStackConnector, "create_mount_init_script") + def test_create_userdata( + self, mock_create_mount_init_script, + mock_create_add_keys_script + ): + # Set up mocks + mock_create_add_keys_script.return_value = b"mock_add_keys_script" + mock_create_mount_init_script.return_value = b"mock_mount_script" + + # Set necessary input parameters + volume_ids_path_new = [{"openstack_id": "volume_id_new"}] + volume_ids_path_attach = [{"openstack_id": "volume_id_attach"}] + additional_keys = ["key1", "key2"] + + # Call the method + result = self.openstack_connector.create_userdata( + volume_ids_path_new, volume_ids_path_attach, additional_keys + ) + + # Assertions + mock_create_add_keys_script.assert_called_once_with(keys=additional_keys) + mock_create_mount_init_script.assert_called_once_with( + new_volumes=volume_ids_path_new, attach_volumes=volume_ids_path_attach + ) + + # Check the result + expected_result = ( + b"mock_add_keys_script\n" + + b"#!/bin/bash\npasswd -u ubuntu\n" + + b"\nmock_mount_script" + ) + self.assertEqual(result, expected_result) + + @patch.object(OpenStackConnector, "get_server") + @patch("simple_vm_client.openstack_connector.openstack_connector.sympy.symbols") + @patch("simple_vm_client.openstack_connector.openstack_connector.sympy.sympify") + def test_get_vm_ports( + self, mock_sympify, mock_symbols, mock_get_server + ): + # Set up mocks + mock_server = fakes.generate_fake_resource(server.Server) + mock_server["private_v4"] = "192.168.1.2" + + mock_get_server.return_value = mock_server + mock_sympify.return_value.evalf.return_value = 30258 # Replace with expected values + mock_symbols.side_effect = ["x", "y"] + + # Call the method + result = self.openstack_connector.get_vm_ports(mock_server.id) + + # Assertions + mock_get_server.assert_called_once_with(openstack_id=mock_server.id) + mock_symbols.assert_any_call("x") + mock_symbols.assert_any_call("y") + + mock_sympify.assert_called_with(self.openstack_connector.SSH_PORT_CALCULATION) + mock_sympify.return_value.evalf.assert_called_with(subs={"x": 2, "y": 1}) + + # Check the result + expected_result = {"port": "30258", "udp": "30258"} # Replace with expected values + self.assertEqual(result, expected_result) + + @patch.object(OpenStackConnector, "get_server") + @patch.object(OpenStackConnector, "_validate_server_for_deletion") + @patch.object(OpenStackConnector, "_remove_security_groups_from_server") + def test_delete_server_successful(self, mock_remove_security_groups, mock_validate_server, mock_get_server): + # Arrange + mock_server = fakes.generate_fake_resource(server.Server) + + mock_get_server.return_value = mock_server + # Act + self.openstack_connector.delete_server(mock_server.id) + + # Assert + mock_get_server.assert_called_once_with(openstack_id=mock_server.id) + mock_validate_server.assert_called_once_with(server=mock_server) + mock_remove_security_groups.assert_called_once_with(server=mock_server) + self.openstack_connector.openstack_connection.compute.delete_server.assert_called_once_with(mock_server.id, force=True) + + @patch.object(OpenStackConnector, "get_server") + def test_delete_server_exception(self, mock_get_server): + # Arrange + mock_server = fakes.generate_fake_resource(server.Server) + # Mocking the necessary methods to raise a ConflictException + mock_get_server.side_effect = ConflictException("Conflict") + # Act + + # Act and Assert + with self.assertRaises(OpenStackConflictException): + self.openstack_connector.delete_server(mock_server.id) + + mock_get_server.assert_called_once_with(openstack_id=mock_server.id) + self.openstack_connector.openstack_connection.compute.delete_server.assert_not_called() + + @patch.object(OpenStackConnector, "get_server") + def test_delete_server_not_found_exception(self, mock_get_server): + # Arrange + # Mocking the necessary methods to raise a ConflictException + server_id = "not_found" + mock_get_server.return_value = None + # Act + + # Act and Assert + with self.assertRaises(ServerNotFoundException): + self.openstack_connector.delete_server(server_id) + + mock_get_server.assert_called_once_with(openstack_id=server_id) + self.openstack_connector.openstack_connection.compute.delete_server.assert_not_called() + + def test_validate_server_for_deletion(self): + # Arrange + server_mock = fakes.generate_fake_resource(server.Server) + + # Act + self.openstack_connector._validate_server_for_deletion(server_mock) + + # Assert + # No exceptions should be raised if the server is found + self.assertTrue(True) + + def test_validate_server_for_deletion_conflict_exception(self): + # Arrange + server_mock = fakes.generate_fake_resource(server.Server) + server_mock.task_state = "image_pending_upload" + with self.assertRaises(ConflictException): + # Act + self.openstack_connector._validate_server_for_deletion(server_mock) + + def test_remove_security_groups_from_server_no_security_groups(self): + # Arrange + server_mock = fakes.generate_fake_resource(server.Server) + server_mock.security_groups = None + + # Act + self.openstack_connector._remove_security_groups_from_server(server_mock) + + # Assert + # The method should not raise any exceptions if there are no security groups + self.assertTrue(True) + + @patch.object(OpenStackConnector, "is_security_group_in_use") + def test_remove_security_groups_from_server_with_security_groups(self, mock_is_security_group_in_use): + # Arrange + server_mock = fakes.generate_fake_resource(server.Server) + fake_groups = list(fakes.generate_fake_resources(security_group.SecurityGroup, count=4)) + fake_groups[2].name = "bibigrid-sec" + server_mock.security_groups = fake_groups + self.openstack_connector.openstack_connection.get_security_group.side_effect = fake_groups + mock_is_security_group_in_use.side_effect = [False, False, True, True] + + # Act + self.openstack_connector._remove_security_groups_from_server(server_mock) + + for group in fake_groups: + self.openstack_connector.openstack_connection.compute.remove_security_group_from_server.assert_any_call( + server=server_mock, security_group=group + ) + + with self.assertRaises(AssertionError): + self.openstack_connector.openstack_connection.delete_security_group.assert_any_call(fake_groups[2]) + self.openstack_connector.openstack_connection.delete_security_group.assert_any_call(fake_groups[3]) + + for group in fake_groups[:2]: + self.openstack_connector.openstack_connection.delete_security_group.assert_any_call(group) + + @patch.object(OpenStackConnector, "get_server") + def test_stop_server_success(self, mock_get_server): + # Arrange + server_mock = fakes.generate_fake_resource(server.Server) + mock_get_server.return_value = server_mock + # Act + self.openstack_connector.stop_server(openstack_id="some_openstack_id") + + # Assert + # Ensure the stop_server method is called with the correct server + self.openstack_connector.openstack_connection.compute.stop_server.assert_called_once_with( + server_mock + ) + + @patch.object(OpenStackConnector, "get_server") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") + def test_stop_server_conflict_exception(self, mock_logger_exception, mock_get_server): + # Arrange + server_mock = fakes.generate_fake_resource(server.Server) + mock_get_server.return_value = server_mock + self.openstack_connector.openstack_connection.compute.stop_server.side_effect = ConflictException("Unit Test") + # Act + with self.assertRaises(OpenStackConflictException): + self.openstack_connector.stop_server(openstack_id="some_openstack_id") + mock_logger_exception.assert_called_once_with(f"Stop Server some_openstack_id failed!") + + @patch.object(OpenStackConnector, "get_server") + def test_reboot_server_success(self, mock_get_server): + # Arrange + server_mock = fakes.generate_fake_resource(server.Server) + mock_get_server.return_value = server_mock + # Act + self.openstack_connector.reboot_server(server_mock.id, "SOFT") + + # Assert + # Ensure the stop_server method is called with the correct server + self.openstack_connector.openstack_connection.compute.reboot_server.assert_called_once_with( + server_mock, "SOFT" + ) + + @patch.object(OpenStackConnector, "get_server") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") + def test_reboot_server_conflict_exception(self, mock_logger_exception, mock_get_server): + + self.openstack_connector.openstack_connection.compute.reboot_server.side_effect = ConflictException("Unit Test") + # Act + with self.assertRaises(OpenStackConflictException): + self.openstack_connector.reboot_server("some_openstack_id", "SOFT") + mock_logger_exception.assert_called_once_with(f"Reboot Server some_openstack_id failed!") + + @patch.object(OpenStackConnector, "get_server") + def test_reboot_soft_server(self, mock_get_server): + # Arrange + server_mock = fakes.generate_fake_resource(server.Server) + mock_get_server.return_value = server_mock + self.openstack_connector.reboot_soft_server(server_mock.id) + self.openstack_connector.openstack_connection.compute.reboot_server.assert_called_once_with( + server_mock, "SOFT" + ) + + @patch.object(OpenStackConnector, "get_server") + def test_reboot_hard_server(self, mock_get_server): + # Arrange + server_mock = fakes.generate_fake_resource(server.Server) + mock_get_server.return_value = server_mock + self.openstack_connector.reboot_hard_server(server_mock.id) + self.openstack_connector.openstack_connection.compute.reboot_server.assert_called_once_with( + server_mock, "HARD" + ) + + @patch.object(OpenStackConnector, "get_server") + def test_resume_server_success(self, mock_get_server): + # Arrange + server_mock = fakes.generate_fake_resource(server.Server) + mock_get_server.return_value = server_mock + # Act + self.openstack_connector.resume_server(server_mock.id) + + # Assert + # Ensure the stop_server method is called with the correct server + self.openstack_connector.openstack_connection.compute.start_server.assert_called_once_with( + server_mock + ) + + @patch.object(OpenStackConnector, "get_server") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") + def test_resume_server_conflict_exception(self, mock_logger_exception, mock_get_server): + + self.openstack_connector.openstack_connection.compute.start_server.side_effect = ConflictException("Unit Test") + # Act + with self.assertRaises(OpenStackConflictException): + self.openstack_connector.resume_server("some_openstack_id") + mock_logger_exception.assert_called_once_with(f"Resume Server some_openstack_id failed!") + + @patch.object(OpenStackConnector, "_calculate_vm_ports") + @patch.object(OpenStackConnector, "get_image") + @patch.object(OpenStackConnector, "get_flavor") + @patch.object(OpenStackConnector, "netcat") + def test_get_server(self, mock_netcat, + mock_get_flavor, mock_get_image, mock_calculate_ports): + # Arrange + openstack_id = "your_openstack_id" + server_mock = fakes.generate_fake_resource(server.Server) + server_mock.vm_state = VmStates.ACTIVE.value + image_mock = fakes.generate_fake_resource(image.Image) + server_mock.image = image_mock + flavor_mock = fakes.generate_fake_resource(flavor.Flavor) + server_mock.flavor = flavor_mock + + # Mocking the methods and attributes + self.openstack_connector.openstack_connection.get_server_by_id.return_value = server_mock + mock_get_image.return_value = image_mock + mock_get_flavor.return_value = flavor_mock + mock_calculate_ports.return_value = (30111, 30111) + mock_netcat.return_value = True # Assuming SSH connection is successful + + # Act + self.openstack_connector.get_server(openstack_id) + + # Assert + self.openstack_connector.openstack_connection.get_server_by_id.assert_called_once_with(id=openstack_id) + mock_calculate_ports.assert_called_once_with(server=server_mock) + mock_netcat.assert_called_once_with(host=self.openstack_connector.GATEWAY_IP, port=30111) + mock_get_image.assert_called_once_with( + name_or_id=image_mock.id, + ignore_not_active=True, + ignore_not_found=True, + ) + mock_get_flavor.assert_called_once_with(name_or_id=flavor_mock.id) + mock_netcat.return_value = False # Assuming SSH connection is successful + # Act + result_server = self.openstack_connector.get_server(openstack_id) + self.assertEqual(result_server.task_state, VmTaskStates.CHECKING_SSH_CONNECTION.value) + + def test_get_server_not_found(self): + self.openstack_connector.openstack_connection.get_server_by_id.return_value = None + + with self.assertRaises(ServerNotFoundException): + self.openstack_connector.get_server("someid") + + def test_get_server_openstack_exception(self): + self.openstack_connector.openstack_connection.get_server_by_id.side_effect = OpenStackCloudException("UNit Test") + + with self.assertRaises(DefaultException): + self.openstack_connector.get_server("someid") + + @patch.object(OpenStackConnector, "get_server") + def test_set_server_metadata_success(self, mock_get_server): + # Arrange + server_mock = fakes.generate_fake_resource(server.Server) + mock_get_server.return_value = server_mock + metadata = {"data": "123"} + # Act + self.openstack_connector.set_server_metadata(server_mock.id, metadata) + + # Assert + # Ensure the stop_server method is called with the correct server + self.openstack_connector.openstack_connection.compute.set_server_metadata.assert_called_once_with( + server_mock, metadata + ) + + @patch.object(OpenStackConnector, "get_server") + def test_set_server_metadata_exception(self, mock_get_server): + # Arrange + server_mock = fakes.generate_fake_resource(server.Server) + mock_get_server.return_value = server_mock + metadata = {"data": "123"} + self.openstack_connector.openstack_connection.compute.set_server_metadata.side_effect = OpenStackCloudException("Unit Tests") + # Act + with self.assertRaises(DefaultException): + self.openstack_connector.set_server_metadata(server_mock.id, metadata) + + @patch.object(OpenStackConnector, "get_server") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") + def test_reboot_server_conflict_exception(self, mock_logger_exception, mock_get_server): + + self.openstack_connector.openstack_connection.compute.reboot_server.side_effect = ConflictException("Unit Test") + # Act + with self.assertRaises(OpenStackConflictException): + self.openstack_connector.reboot_server("some_openstack_id", "SOFT") + mock_logger_exception.assert_called_once_with(f"Reboot Server some_openstack_id failed!") + + def test_exist_server_true(self): + server_mock = fakes.generate_fake_resource(server.Server) + + self.openstack_connector.openstack_connection.compute.find_server.return_value = server_mock + + result = self.openstack_connector.exist_server(server_mock.name) + self.assertTrue(result) + + def test_exist_server_false(self): + server_mock = fakes.generate_fake_resource(server.Server) + + self.openstack_connector.openstack_connection.compute.find_server.return_value = None + + result = self.openstack_connector.exist_server(server_mock.name) + self.assertFalse(result) + + def test_get_or_create_project_security_group_exists(self): + # Mock the get_security_group method to simulate an existing security group + existing_security_group = fakes.generate_fake_resource(security_group.SecurityGroup) + + self.openstack_connector.openstack_connection.get_security_group.return_value = existing_security_group + + # Call the method + result = self.openstack_connector.get_or_create_project_security_group("project_name", "project_id") + + # Assertions + self.assertEqual(result, existing_security_group.id) + self.openstack_connector.openstack_connection.create_security_group.assert_not_called() + + def test_get_or_create_project_security_group_create_new(self): + # Mock the get_security_group method to simulate a non-existing security group + self.openstack_connector.openstack_connection.get_security_group.return_value = None + + # Mock the create_security_group method to simulate creating a new security group + new_security_group = fakes.generate_fake_resource(security_group.SecurityGroup) + self.openstack_connector.openstack_connection.create_security_group.return_value = new_security_group + + # Call the method + result = self.openstack_connector.get_or_create_project_security_group("project_name", "project_id") + + # Assertions + self.assertEqual(result, new_security_group.id) + self.openstack_connector.openstack_connection.create_security_group.assert_called_once() + + def test_get_or_create_vm_security_group_exist(self): + # Mock the get_security_group method to simulate an existing security group + existing_security_group = fakes.generate_fake_resource(security_group.SecurityGroup) + + self.openstack_connector.openstack_connection.get_security_group.return_value = existing_security_group + + # Call the method + result = self.openstack_connector.get_or_create_vm_security_group("server_id") + + # Assertions + self.assertEqual(result, existing_security_group.id) + self.openstack_connector.openstack_connection.create_security_group.assert_not_called() + + def test_get_or_create_vm_security_group_create_new(self): + # Mock the get_security_group method to simulate a non-existing security group + self.openstack_connector.openstack_connection.get_security_group.return_value = None + + # Mock the create_security_group method to simulate creating a new security group + new_security_group = fakes.generate_fake_resource(security_group.SecurityGroup) + self.openstack_connector.openstack_connection.create_security_group.return_value = new_security_group + + # Call the method + result = self.openstack_connector.get_or_create_vm_security_group("openstack_id") + + # Assertions + self.assertEqual(result, new_security_group.id) + self.openstack_connector.openstack_connection.create_security_group.assert_called_once() + + def test_get_or_create_research_environment_security_group_exist(self): + # Mock the get_security_group method to simulate an existing security group + existing_security_group = fakes.generate_fake_resource(security_group.SecurityGroup) + + self.openstack_connector.openstack_connection.get_security_group.return_value = existing_security_group + + # Call the method + result = self.openstack_connector.get_or_create_research_environment_security_group(resenv_metadata=METADATA_EXAMPLE) + + # Assertions + self.assertEqual(result, existing_security_group.id) + self.openstack_connector.openstack_connection.create_security_group.assert_not_called() + + def test_get_or_create_research_environment_no_forc_support(self): + # Mock the get_security_group method to simulate a non-existing security group + self.openstack_connector.get_or_create_research_environment_security_group(resenv_metadata=METADATA_EXAMPLE_NO_FORC) + self.openstack_connector.openstack_connection.get_security_group.assert_not_called() + + def test_get_or_create_research_environment_security_group_new(self): + # Mock the get_security_group method to simulate a non-existing security group + self.openstack_connector.openstack_connection.get_security_group.return_value = None + + # Mock the create_security_group method to simulate creating a new security group + new_security_group = fakes.generate_fake_resource(security_group.SecurityGroup) + self.openstack_connector.openstack_connection.create_security_group.return_value = new_security_group + + # Call the method + result = self.openstack_connector.get_or_create_research_environment_security_group(resenv_metadata=METADATA_EXAMPLE) + + # Assertions + self.assertEqual(result, new_security_group.id) + self.openstack_connector.openstack_connection.create_security_group.assert_called_once() + + def test_is_security_group_in_use_instances(self): + # Mock the compute.servers method to simulate instances using the security group + instances = [{"id": "instance_id", "name": "instance_name"}] + self.openstack_connector.openstack_connection.compute.servers = MagicMock(return_value=instances) + + # Call the method + result = self.openstack_connector.is_security_group_in_use("security_group_id") + + # Assertions + self.assertTrue(result) + + def test_is_security_group_in_use_ports(self): + # Mock the network.ports method to simulate ports associated with the security group + ports = [{"id": "port_id", "name": "port_name"}] + self.openstack_connector.openstack_connection.network.ports = MagicMock(return_value=ports) + + # Call the method + result = self.openstack_connector.is_security_group_in_use("security_group_id") + + # Assertions + self.assertTrue(result) + + def test_is_security_group_in_use_load_balancers(self): + # Mock the network.load_balancers method to simulate load balancers associated with the security group + load_balancers = [{"id": "lb_id", "name": "lb_name"}] + self.openstack_connector.openstack_connection.network.load_balancers = MagicMock(return_value=load_balancers) + + # Call the method + result = self.openstack_connector.is_security_group_in_use("security_group_id") + + # Assertions + self.assertTrue(result) + + def test_is_security_group_not_in_use(self): + # Mock both compute.servers and network.ports methods to simulate no usage of the security group + self.openstack_connector.openstack_connection.compute.servers = MagicMock(return_value=[]) + self.openstack_connector.openstack_connection.network.ports = MagicMock(return_value=[]) + self.openstack_connector.openstack_connection.network.load_balancers = MagicMock(return_value=[]) + + + # Call the method + result = self.openstack_connector.is_security_group_in_use("security_group_id") + + # Assertions + self.assertFalse(result) + + + def test_create_security_group(self): + # Mock the get_security_group method to simulate non-existing security group + self.openstack_connector.openstack_connection.get_security_group.return_value = None + + # Mock the create_security_group method to return a fake SecurityGroup + fake_sg = fakes.generate_fake_resource(security_group.SecurityGroup) + self.openstack_connector.openstack_connection.create_security_group.return_value =fake_sg + + # Call the method + result = self.openstack_connector.create_security_group( + name=fake_sg.name, + udp_port=1234, + ssh=True, + udp=True, + description=fake_sg.description, + research_environment_metadata=METADATA_EXAMPLE, + ) + + # Assertions + self.assertEqual(result, fake_sg) + self.openstack_connector.openstack_connection.create_security_group.assert_called_once_with(name=fake_sg.name, description=fake_sg.description) + self.openstack_connector.openstack_connection.create_security_group_rule.assert_any_call( + direction="ingress", + protocol="udp", + port_range_max=1234, + port_range_min=1234, + secgroup_name_or_id=fake_sg.id, + remote_group_id=self.openstack_connector.GATEWAY_SECURITY_GROUP_ID, + ) + self.openstack_connector.openstack_connection.create_security_group_rule.assert_any_call( + direction="ingress", + protocol="udp", + ethertype="IPv6", + port_range_max=1234, + port_range_min=1234, + secgroup_name_or_id=fake_sg.id, + remote_group_id=self.openstack_connector.GATEWAY_SECURITY_GROUP_ID, + ) + + self.openstack_connector.openstack_connection.create_security_group_rule.assert_any_call( + direction="ingress", + protocol="tcp", + port_range_max=22, + port_range_min=22, + secgroup_name_or_id=fake_sg.id, + remote_group_id=self.openstack_connector.GATEWAY_SECURITY_GROUP_ID, + ) + self.openstack_connector.openstack_connection.create_security_group_rule.assert_any_call( + direction="ingress", + protocol="tcp", + ethertype="IPv6", + port_range_max=22, + port_range_min=22, + secgroup_name_or_id=fake_sg.id, + remote_group_id=self.openstack_connector.GATEWAY_SECURITY_GROUP_ID, + ) if __name__ == "__main__": unittest.main() From 776dde9ce5fe2b4b977d384b50286664e8683f8e Mon Sep 17 00:00:00 2001 From: dweinholz Date: Wed, 3 Jan 2024 16:04:08 +0100 Subject: [PATCH 31/39] added more tests openstack --- .../openstack_connector.py | 193 +++++++++--------- 1 file changed, 98 insertions(+), 95 deletions(-) diff --git a/simple_vm_client/openstack_connector/openstack_connector.py b/simple_vm_client/openstack_connector/openstack_connector.py index 187479e..c850f70 100644 --- a/simple_vm_client/openstack_connector/openstack_connector.py +++ b/simple_vm_client/openstack_connector/openstack_connector.py @@ -141,7 +141,7 @@ def load_env_config(self) -> None: sys.exit(1) self.USE_APPLICATION_CREDENTIALS = ( - os.environ.get("USE_APPLICATION_CREDENTIALS", "False").lower() == "true" + os.environ.get("USE_APPLICATION_CREDENTIALS", "False").lower() == "true" ) if self.USE_APPLICATION_CREDENTIALS: @@ -183,15 +183,15 @@ def load_env_config(self) -> None: self.PROJECT_DOMAIN_ID = os.environ["OS_PROJECT_DOMAIN_ID"] def create_server( - self, - name: str, - image_id: str, - flavor_id: str, - network_id: str, - userdata: str, - key_name: str, - metadata: dict[str, str], - security_groups: list[str], + self, + name: str, + image_id: str, + flavor_id: str, + network_id: str, + userdata: str, + key_name: str, + metadata: dict[str, str], + security_groups: list[str], ) -> Server: logger.info( f"Create Server:\n\tname: {name}\n\timage_id:{image_id}\n\tflavor_id:{flavor_id}\n\tmetadata:{metadata}" @@ -233,7 +233,7 @@ def delete_volume(self, volume_id: str) -> None: raise DefaultException(message=e.message) def create_volume_snapshot( - self, volume_id: str, name: str, description: str + self, volume_id: str, name: str, description: str ) -> str: try: logger.info(f"Create Snapshot for Volume {volume_id}") @@ -276,7 +276,7 @@ def delete_volume_snapshot(self, snapshot_id: str) -> None: raise DefaultException(message=e.message) def create_volume_by_source_volume( - self, volume_name: str, metadata: dict[str, str], source_volume_id: str + self, volume_name: str, metadata: dict[str, str], source_volume_id: str ) -> Volume: logger.info(f"Creating volume from source volume with id {source_volume_id}") try: @@ -292,7 +292,7 @@ def create_volume_by_source_volume( raise ResourceNotAvailableException(message=e.message) def create_volume_by_volume_snap( - self, volume_name: str, metadata: dict[str, str], volume_snap_id: str + self, volume_name: str, metadata: dict[str, str], volume_snap_id: str ) -> Volume: logger.info(f"Creating volume from volume snapshot with id {volume_snap_id}") try: @@ -330,7 +330,7 @@ def get_servers_by_ids(self, ids: list[str]) -> list[Server]: return servers def attach_volume_to_server( - self, openstack_id: str, volume_id: str + self, openstack_id: str, volume_id: str ) -> dict[str, str]: server = self.get_server(openstack_id=openstack_id) volume = self.get_volume(name_or_id=volume_id) @@ -371,7 +371,7 @@ def resize_volume(self, volume_id: str, size: int) -> None: raise DefaultException(message=str(e)) def create_volume( - self, volume_name: str, volume_storage: int, metadata: dict[str, str] + self, volume_name: str, volume_storage: int, metadata: dict[str, str] ) -> Volume: logger.info(f"Creating volume with {volume_storage} GB storage") try: @@ -488,9 +488,9 @@ def get_active_image_by_os_version(self, os_version: str, os_distro: str) -> Ima image_os_distro = metadata.get("os_distro", None) base_image_ref = metadata.get("base_image_ref", None) if ( - os_version == image_os_version - and image.status == "active" - and base_image_ref is None + os_version == image_os_version + and image.status == "active" + and base_image_ref is None ): if os_distro and os_distro == image_os_distro: return image @@ -502,11 +502,11 @@ def get_active_image_by_os_version(self, os_version: str, os_distro: str) -> Ima ) def get_image( - self, - name_or_id: str, - replace_inactive: bool = False, - ignore_not_active: bool = False, - ignore_not_found: bool = False, + self, + name_or_id: str, + replace_inactive: bool = False, + ignore_not_active: bool = False, + ignore_not_found: bool = False, ) -> Image: logger.info(f"Get Image {name_or_id}") @@ -530,12 +530,12 @@ def get_image( return image def create_snapshot( - self, - openstack_id: str, - name: str, - username: str, - base_tags: list[str], - description: str, + self, + openstack_id: str, + name: str, + username: str, + base_tags: list[str], + description: str, ) -> str: logger.info( f"Create Snapshot from Instance {openstack_id} with name {name} for {username}" @@ -647,9 +647,9 @@ def get_gateway_ip(self) -> dict[str, str]: return {"gateway_ip": self.GATEWAY_IP} def create_mount_init_script( - self, - new_volumes: list[dict[str, str]] = None, # type: ignore - attach_volumes: list[dict[str, str]] = None, # type: ignore + self, + new_volumes: list[dict[str, str]] = None, # type: ignore + attach_volumes: list[dict[str, str]] = None, # type: ignore ) -> str: logger.info(f"Create init script for volume ids:{new_volumes}") if not new_volumes and not attach_volumes: @@ -730,7 +730,7 @@ def delete_security_group_rule(self, openstack_id): ) def open_port_range_for_vm_in_project( - self, range_start, range_stop, openstack_id, ethertype="IPV4", protocol="TCP" + self, range_start, range_stop, openstack_id, ethertype="IPV4", protocol="TCP" ): server: Server = self.openstack_connection.get_server_by_id(id=openstack_id) if server is None: @@ -779,13 +779,13 @@ def open_port_range_for_vm_in_project( raise OpenStackConflictException(message=e.message) def create_security_group( - self, - name: str, - udp_port: int = None, # type: ignore - ssh: bool = True, - udp: bool = False, - description: str = "", - research_environment_metadata: ResearchEnvironmentMetadata = None, + self, + name: str, + udp_port: int = None, # type: ignore + ssh: bool = True, + udp: bool = False, + description: str = "", + research_environment_metadata: ResearchEnvironmentMetadata = None, ) -> SecurityGroup: logger.info(f"Create new security group {name}") sec: SecurityGroup = self.openstack_connection.get_security_group( @@ -894,7 +894,7 @@ def is_security_group_in_use(self, security_group_id): return False def get_or_create_research_environment_security_group( - self, resenv_metadata: ResearchEnvironmentMetadata + self, resenv_metadata: ResearchEnvironmentMetadata ): if not resenv_metadata.needs_forc_support: return None @@ -915,7 +915,8 @@ def get_or_create_research_environment_security_group( ) new_security_group = self.openstack_connection.create_security_group( - name=resenv_metadata.securitygroup_name, description=resenv_metadata.description + name=resenv_metadata.securitygroup_name, + description=resenv_metadata.description, ) self.openstack_connection.network.create_security_group_rule( direction=resenv_metadata.direction, @@ -1074,16 +1075,18 @@ def _remove_security_groups_from_server(self, server: Server) -> None: if security_groups is not None: for sg in security_groups: - sec = self.openstack_connection.get_security_group(name_or_id=sg["name"]) + sec = self.openstack_connection.get_security_group( + name_or_id=sg["name"] + ) logger.info(f"Remove security group {sec.id} from server {server.id}") self.openstack_connection.compute.remove_security_group_from_server( server=server, security_group=sec ) if ( - sg["name"] != self.DEFAULT_SECURITY_GROUP_NAME - and ("bibigrid" not in sec.name or "master" not in server.name) - and not self.is_security_group_in_use(security_group_id=sec.id) + sg["name"] != self.DEFAULT_SECURITY_GROUP_NAME + and ("bibigrid" not in sec.name or "master" not in server.name) + and not self.is_security_group_in_use(security_group_id=sec.id) ): logger.info(f"Delete security group {sec}") @@ -1145,10 +1148,10 @@ def get_vm_ports(self, openstack_id: str) -> dict[str, str]: return {"port": str(ssh_port), "udp": str(udp_port)} def create_userdata( - self, - volume_ids_path_new: list[dict[str, str]], - volume_ids_path_attach: list[dict[str, str]], - additional_keys: list[str], + self, + volume_ids_path_new: list[dict[str, str]], + volume_ids_path_attach: list[dict[str, str]], + additional_keys: list[str], ) -> str: unlock_ubuntu_user_script = "#!/bin/bash\npasswd -u ubuntu\n" unlock_ubuntu_user_script_encoded = encodeutils.safe_encode( @@ -1159,9 +1162,9 @@ def create_userdata( if additional_keys: add_key_script = self.create_add_keys_script(keys=additional_keys) init_script = ( - add_key_script - + encodeutils.safe_encode("\n".encode("utf-8")) - + init_script + add_key_script + + encodeutils.safe_encode("\n".encode("utf-8")) + + init_script ) if volume_ids_path_new or volume_ids_path_attach: mount_script = self.create_mount_init_script( @@ -1169,25 +1172,25 @@ def create_userdata( attach_volumes=volume_ids_path_attach, ) init_script = ( - init_script - + encodeutils.safe_encode("\n".encode("utf-8")) - + mount_script + init_script + + encodeutils.safe_encode("\n".encode("utf-8")) + + mount_script ) return init_script def start_server( - self, - flavor_name: str, - image_name: str, - servername: str, - metadata: dict[str, str], - public_key: str, - research_environment_metadata: Union[ResearchEnvironmentMetadata, None] = None, - volume_ids_path_new: Union[list[dict[str, str]], None] = None, - volume_ids_path_attach: Union[list[dict[str, str]], None] = None, - additional_keys: Union[list[str], None] = None, - additional_security_group_ids: Union[list[str], None] = None, + self, + flavor_name: str, + image_name: str, + servername: str, + metadata: dict[str, str], + public_key: str, + research_environment_metadata: Union[ResearchEnvironmentMetadata, None] = None, + volume_ids_path_new: Union[list[dict[str, str]], None] = None, + volume_ids_path_attach: Union[list[dict[str, str]], None] = None, + additional_keys: Union[list[str], None] = None, + additional_security_group_ids: Union[list[str], None] = None, ) -> str: logger.info(f"Start Server {servername}") @@ -1245,9 +1248,9 @@ def start_server( raise DefaultException(message=str(e)) def _get_volumes_machines_start( - self, - volume_ids_path_new: list[dict[str, str]] = None, - volume_ids_path_attach: list[dict[str, str]] = None, + self, + volume_ids_path_new: list[dict[str, str]] = None, + volume_ids_path_attach: list[dict[str, str]] = None, ) -> list[Volume]: volume_ids = [] volumes = [] @@ -1261,11 +1264,11 @@ def _get_volumes_machines_start( return volumes def _get_security_groups_starting_machine( - self, - additional_security_group_ids: Union[list[str], None] = None, - project_name: Union[str, None] = None, - project_id: Union[str, None] = None, - research_environment_metadata: Union[ResearchEnvironmentMetadata, None] = None, + self, + additional_security_group_ids: Union[list[str], None] = None, + project_name: Union[str, None] = None, + project_id: Union[str, None] = None, + research_environment_metadata: Union[ResearchEnvironmentMetadata, None] = None, ) -> list[str]: security_groups = self._get_default_security_groups() if research_environment_metadata: @@ -1290,16 +1293,16 @@ def _get_security_groups_starting_machine( return security_groups def start_server_with_playbook( - self, - flavor_name: str, - image_name: str, - servername: str, - metadata: dict[str, str], - research_environment_metadata: ResearchEnvironmentMetadata, - volume_ids_path_new: list[dict[str, str]] = None, # type: ignore - volume_ids_path_attach: list[dict[str, str]] = None, # type: ignore - additional_keys: list[str] = None, # type: ignore - additional_security_group_ids=None, # type: ignore + self, + flavor_name: str, + image_name: str, + servername: str, + metadata: dict[str, str], + research_environment_metadata: ResearchEnvironmentMetadata, + volume_ids_path_new: list[dict[str, str]] = None, # type: ignore + volume_ids_path_attach: list[dict[str, str]] = None, # type: ignore + additional_keys: list[str] = None, # type: ignore + additional_security_group_ids=None, # type: ignore ) -> tuple[str, str]: logger.info(f"Start Server {servername}") @@ -1409,16 +1412,16 @@ def add_udp_security_group(self, server_id): return def add_cluster_machine( - self, - cluster_id: str, - cluster_user: str, - cluster_group_id: list[str], - image_name: str, - flavor_name: str, - name: str, - key_name: str, - batch_idx: int, - worker_idx: int, + self, + cluster_id: str, + cluster_user: str, + cluster_group_id: list[str], + image_name: str, + flavor_name: str, + name: str, + key_name: str, + batch_idx: int, + worker_idx: int, ) -> str: logger.info(f"Add machine to {cluster_id}") image: Image = self.get_image(name_or_id=image_name, replace_inactive=True) From b77e1c5eddfaa4c7ca7b763145abd2aee6bb8be4 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Thu, 4 Jan 2024 09:35:11 +0100 Subject: [PATCH 32/39] feat(UnitTests):finished openstack connector --- .../openstack_connector.py | 99 +-- simple_vm_client/test_openstack_connector.py | 780 +++++++++++++++--- 2 files changed, 687 insertions(+), 192 deletions(-) diff --git a/simple_vm_client/openstack_connector/openstack_connector.py b/simple_vm_client/openstack_connector/openstack_connector.py index c850f70..23d4ed5 100644 --- a/simple_vm_client/openstack_connector/openstack_connector.py +++ b/simple_vm_client/openstack_connector/openstack_connector.py @@ -79,6 +79,7 @@ def __init__(self, config_file: str): self.USE_APPLICATION_CREDENTIALS: bool = False self.load_env_config() + print("loading config file") self.load_config_yml(config_file) try: @@ -462,15 +463,9 @@ def get_flavor(self, name_or_id: str) -> Flavor: def get_flavors(self) -> list[Flavor]: logger.info("Get Flavors") - if self.openstack_connection: - flavors: list[Flavor] = self.openstack_connection.list_flavors( - get_extra=True - ) - logger.info([flav["name"] for flav in flavors]) - return flavors - else: - logger.info("no connection") - return [] + flavors: list[Flavor] = self.openstack_connection.list_flavors(get_extra=True) + logger.info([flav["name"] for flav in flavors]) + return flavors def get_servers_by_bibigrid_id(self, bibigrid_id: str) -> list[Server]: logger.info(f"Get Servery by Bibigrid id: {bibigrid_id}") @@ -575,58 +570,44 @@ def delete_image(self, image_id: str) -> None: def get_public_images(self) -> list[Image]: logger.info("Get public images") - if self.openstack_connection: - # Use compute.images() method with filters and extra_info - images = self.openstack_connection.image.images( - status="active", visibility="public" - ) - # Use list comprehension to filter images based on tags - images = [ - image for image in images if "tags" in image and len(image["tags"]) > 0 - ] - image_names = [image.name for image in images] - logger.info(f"Found public images - {image_names}") - - return images + # Use compute.images() method with filters and extra_info + images = self.openstack_connection.image.images( + status="active", visibility="public" + ) + # Use list comprehension to filter images based on tags + images = [ + image for image in images if "tags" in image and len(image["tags"]) > 0 + ] + image_names = [image.name for image in images] + logger.info(f"Found public images - {image_names}") - else: - logger.info("no connection") - return [] + return images def get_private_images(self) -> list[Image]: logger.info("Get private images") - if self.openstack_connection: - # Use compute.images() method with filters and extra_info - images = self.openstack_connection.image.images( - status="active", visibility="private" - ) - # Use list comprehension to filter images based on tags - images = [ - image for image in images if "tags" in image and len(image["tags"]) > 0 - ] - image_names = [image.name for image in images] - logger.info(f"Found private images - {image_names}") + # Use compute.images() method with filters and extra_info + images = self.openstack_connection.image.images( + status="active", visibility="private" + ) + # Use list comprehension to filter images based on tags + images = [ + image for image in images if "tags" in image and len(image["tags"]) > 0 + ] + image_names = [image.name for image in images] + logger.info(f"Found private images - {image_names}") - return images - else: - logger.info("no connection") - return [] + return images def get_images(self) -> list[Image]: logger.info("Get Images") - if self.openstack_connection: - images = self.openstack_connection.image.images(status="active") - images = [ - image for image in images if "tags" in image and len(image["tags"]) > 0 - ] - image_names = [image.name for image in images] - - logger.info(f"Found images - {image_names}") + images = self.openstack_connection.image.images(status="active") + images = [ + image for image in images if "tags" in image and len(image["tags"]) > 0 + ] + image_names = [image.name for image in images] - return images - else: - logger.info("no connection") - return [] + logger.info(f"Found images - {image_names}") + return images def get_calculation_values(self) -> dict[str, str]: logger.info("Get Client Calculation Values") @@ -712,11 +693,12 @@ def create_or_get_default_ssh_security_group(self): if not sec: logger.info("Default SimpleVM SSH Security group not found... Creating") - self.create_security_group( + sec = self.create_security_group( name=self.DEFAULT_SECURITY_GROUP_NAME, ssh=True, description="Default SSH SimpleVM Security Group", ) + return sec def delete_security_group_rule(self, openstack_id): logger.info(f"Delete Security Group Rule -- {openstack_id}") @@ -730,15 +712,10 @@ def delete_security_group_rule(self, openstack_id): ) def open_port_range_for_vm_in_project( - self, range_start, range_stop, openstack_id, ethertype="IPV4", protocol="TCP" + self, range_start, range_stop, openstack_id, ethertype="IPv4", protocol="TCP" ): - server: Server = self.openstack_connection.get_server_by_id(id=openstack_id) - if server is None: - logger.exception(f"Instance {openstack_id} not found") - raise ServerNotFoundException( - message=f"Instance {openstack_id} not found", - name_or_id=openstack_id, - ) + server: Server = self.get_server(openstack_id=openstack_id) + project_name = server.metadata.get("project_name") project_id = server.metadata.get("project_id") diff --git a/simple_vm_client/test_openstack_connector.py b/simple_vm_client/test_openstack_connector.py index 468d1d3..00d3237 100644 --- a/simple_vm_client/test_openstack_connector.py +++ b/simple_vm_client/test_openstack_connector.py @@ -6,7 +6,6 @@ from unittest import mock from unittest.mock import MagicMock, call, patch -import pytest from openstack.block_storage.v3 import volume from openstack.block_storage.v3.limits import Limit from openstack.block_storage.v3.volume import Volume @@ -16,21 +15,26 @@ from openstack.exceptions import ConflictException, ResourceFailure, ResourceNotFound from openstack.image.v2 import image from openstack.image.v2 import image as image_module -from openstack.network.v2 import security_group +from openstack.network.v2 import security_group, security_group_rule from openstack.network.v2.network import Network from openstack.test import fakes from oslo_utils import encodeutils -from simple_vm_client.forc_connector.template.template import ResearchEnvironmentMetadata +from simple_vm_client.forc_connector.template.template import ( + ResearchEnvironmentMetadata, +) from simple_vm_client.util.state_enums import VmStates, VmTaskStates + from .openstack_connector.openstack_connector import OpenStackConnector from .ttypes import ( DefaultException, + FlavorNotFoundException, ImageNotFoundException, OpenStackConflictException, ResourceNotAvailableException, + ServerNotFoundException, SnapshotNotFoundException, - VolumeNotFoundException, ServerNotFoundException, + VolumeNotFoundException, ) METADATA_EXAMPLE_NO_FORC = ResearchEnvironmentMetadata( @@ -192,15 +196,19 @@ def test_load_config_yml(self): # Call the load_config_yml method with the temporary file path self.openstack_connector.load_config_yml(temp_file.name) - + os.remove(temp_file.name) # Assert that the configuration attributes are set correctly self.assertEqual(self.openstack_connector.GATEWAY_IP, "192.168.1.1") self.assertEqual(self.openstack_connector.NETWORK, "my_network") self.assertEqual(self.openstack_connector.SUB_NETWORK, "my_sub_network") self.assertTrue(self.openstack_connector.PRODUCTION) self.assertEqual(self.openstack_connector.CLOUD_SITE, "my_cloud_site") - self.assertEqual(self.openstack_connector.SSH_PORT_CALCULATION, PORT_CALCULATION) - self.assertEqual(self.openstack_connector.UDP_PORT_CALCULATION, PORT_CALCULATION) + self.assertEqual( + self.openstack_connector.SSH_PORT_CALCULATION, PORT_CALCULATION + ) + self.assertEqual( + self.openstack_connector.UDP_PORT_CALCULATION, PORT_CALCULATION + ) self.assertEqual( self.openstack_connector.FORC_SECURITY_GROUP_ID, "forc_security_group_id" ) @@ -394,6 +402,21 @@ def test_get_images(self, mock_logger_info): # Assert that the method returns the expected result self.assertEqual(result, IMAGES) + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") + def test_get_private_images(self, mock_logger_info): + # Configure the mock_openstack_connection.image.images to return the fake images + self.mock_openstack_connection.image.images.return_value = IMAGES + + # Call the method + result = self.openstack_connector.get_private_images() + mock_logger_info.assert_any_call("Get private images") + image_names = [image.name for image in IMAGES] + + mock_logger_info.assert_any_call(f"Found private images - {image_names}") + + # Assert that the method returns the expected result + self.assertEqual(result, IMAGES) + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_get_active_image_by_os_version(self, mock_logger_info): # Generate a set of fake images with different properties @@ -412,6 +435,10 @@ def test_get_active_image_by_os_version(self, mock_logger_info): # Assert that the method returns the expected image self.assertEqual(result, EXPECTED_IMAGE) + result = self.openstack_connector.get_active_image_by_os_version( + os_version=os_version, os_distro=None + ) + self.assertEqual(result, EXPECTED_IMAGE) @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_get_active_image_by_os_version_not_found_exception(self, mock_logger_info): @@ -461,8 +488,12 @@ def test_get_limits(self): absolute_volume = volume_limits["absolute"] for key in absolute_volume.keys(): volume_limits["absolute"][key] = random.randint(0, 10000) - self.openstack_connector.openstack_connection.get_compute_limits.return_value = compute_copy - self.openstack_connector.openstack_connection.get_volume_limits.return_value = volume_limits + self.openstack_connector.openstack_connection.get_compute_limits.return_value = ( + compute_copy + ) + self.openstack_connector.openstack_connection.get_volume_limits.return_value = ( + volume_limits + ) self.openstack_connector.get_limits() @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") @@ -548,7 +579,7 @@ def test_get_volume_exception(self, mock_logger_exception): # Call the get_volume method and expect a VolumeNotFoundException with self.assertRaises( - Exception + Exception ): # Replace Exception with the actual exception type self.openstack_connector.get_volume(name_or_id) @@ -583,21 +614,21 @@ def test_delete_volume(self, mock_logger_exception, mock_logger_info): # 2. ResourceNotFound, expect VolumeNotFoundException with self.assertRaises( - VolumeNotFoundException + VolumeNotFoundException ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume(volume_id) mock_logger_exception.assert_called_with(f"No Volume with id {volume_id}") # 3. ConflictException, expect OpenStackCloudException with self.assertRaises( - OpenStackCloudException + OpenStackCloudException ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume(volume_id) mock_logger_exception.assert_called_with(f"Delete volume: {volume_id}) failed!") # 4. OpenStackCloudException, expect DefaultException with self.assertRaises( - DefaultException + DefaultException ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume(volume_id) @@ -697,14 +728,14 @@ def test_delete_volume_snapshot(self, mock_logger_exception, mock_logger_info): # 2. ResourceNotFound, expect SnapshotNotFoundException with self.assertRaises( - SnapshotNotFoundException + SnapshotNotFoundException ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume_snapshot(snapshot_id) mock_logger_exception.assert_called_with(f"Snapshot not found: {snapshot_id}") # 3. ConflictException, expect OpenStackCloudException with self.assertRaises( - OpenStackCloudException + OpenStackCloudException ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume_snapshot(snapshot_id) mock_logger_exception.assert_called_with( @@ -713,7 +744,7 @@ def test_delete_volume_snapshot(self, mock_logger_exception, mock_logger_info): # 4. OpenStackCloudException, expect DefaultException with self.assertRaises( - DefaultException + DefaultException ): # Replace Exception with the actual exception type self.openstack_connector.delete_volume_snapshot(snapshot_id) @@ -736,7 +767,7 @@ def test_get_servers(self, mock_logger_info): @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_get_servers_by_ids( - self, mock_logger_info, mock_logger_exception, mock_logger_error + self, mock_logger_info, mock_logger_exception, mock_logger_error ): # Prepare test data server_ids = ["id1", "id2", "id3", "id4"] @@ -924,6 +955,13 @@ def test_create_volume(self, mock_logger_info, mock_logger_exception): f"Trying to create volume with {volume_storage} GB failed", exc_info=True ) + def test_network_not_found(self): + self.openstack_connector.openstack_connection.network.find_network.return_value = ( + None + ) + with self.assertRaises(Exception): + self.openstack_connector.get_network() + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") def test_get_network(self, mock_logger_exception): with tempfile.NamedTemporaryFile(mode="w+", delete=False) as temp_file: @@ -1097,6 +1135,11 @@ def test_netcat(self, mock_logger_info, mock_socket): f"Checking SSH Connection {host}:{port} Result = 0" ) + def test_get_flavor_exception(self): + self.openstack_connector.openstack_connection.get_flavor.return_value = None + with self.assertRaises(FlavorNotFoundException): + self.openstack_connector.get_flavor("not_found") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_get_flavor(self, mock_logger_info): # Replace with the actual flavor name or ID @@ -1204,6 +1247,11 @@ def test_create_snapshot(self, mock_logger_info, mock_logger_exception): openstack_id, name, username, base_tags, description ) + def test_delete_image_not_found(self): + self.openstack_connector.openstack_connection.get_image.return_value = None + with self.assertRaises(Exception): + self.openstack_connector.delete_image("not_found") + @mock.patch( "simple_vm_client.openstack_connector.openstack_connector.logger.exception" ) @@ -1253,12 +1301,12 @@ def test_get_public_images(self, mock_logger_info): @patch.object(OpenStackConnector, "create_server") @mock.patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_add_cluster_machine( - self, - mock_logger_info, - mock_create_server, - mock_get_network, - mock_get_flavor, - mock_get_image, + self, + mock_logger_info, + mock_create_server, + mock_get_network, + mock_get_flavor, + mock_get_image, ): # Arrange cluster_id = "123" @@ -1351,7 +1399,7 @@ def test_add_udp_security_group_existing_group(self): @patch.object(OpenStackConnector, "create_security_group") @mock.patch("simple_vm_client.openstack_connector.openstack_connector.logger.info") def test_add_udp_security_group_new_group( - self, mock_logger_info, mock_create_security_group, mock_get_vm_ports + self, mock_logger_info, mock_create_security_group, mock_get_vm_ports ): # Test when a new UDP security group needs to be created @@ -1434,11 +1482,11 @@ def test_add_udp_security_group_already_added(self, mock_logger_info): @patch.object(OpenStackConnector, "create_userdata") @patch.object(OpenStackConnector, "delete_keypair") def test_start_server_with_playbook( - self, - mock_delete_keypair, - mock_create_userdata, - mock_get_volumes, - mock_get_security_groups_starting_machine, + self, + mock_delete_keypair, + mock_create_userdata, + mock_get_volumes, + mock_get_security_groups_starting_machine, ): server = fakes.generate_fake_resource(Server) server_keypair = fakes.generate_fake_resource(keypair.Keypair) @@ -1540,12 +1588,12 @@ def test_start_server_with_playbook( @patch.object(OpenStackConnector, "delete_keypair") @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") def test_start_server_with_playbook_exception( - self, - mock_logger_exception, - mock_delete_keypair, - mock_create_userdata, - mock_get_volumes, - mock_get_security_groups_starting_machine, + self, + mock_logger_exception, + mock_delete_keypair, + mock_create_userdata, + mock_get_volumes, + mock_get_security_groups_starting_machine, ): server = fakes.generate_fake_resource(Server) server_keypair = fakes.generate_fake_resource(keypair.Keypair) @@ -1608,10 +1656,10 @@ def test_start_server_with_playbook_exception( ) @patch.object(OpenStackConnector, "get_or_create_project_security_group") def test_get_security_groups_starting_machine( - self, - mock_get_project_sg, - mock_get_research_env_sg, - mock_get_default_security_groups, + self, + mock_get_project_sg, + mock_get_research_env_sg, + mock_get_default_security_groups, ): # Set up mocks fake_default_security_group = fakes.generate_fake_resource( @@ -1691,17 +1739,31 @@ def test_get_volumes_machines_start(self): # Check the result expected_result = [fake_vol_1, fake_vol_2] self.assertEqual(result, expected_result) + volume_ids_path_new = [] + volume_ids_path_attach = [] + + # Call the method + result = self.openstack_connector._get_volumes_machines_start( + volume_ids_path_new, volume_ids_path_attach + ) + + # Assertions + self.openstack_connector.openstack_connection.get_volume.assert_has_calls([]) + + # Check the result + expected_result = [] + self.assertEqual(result, expected_result) @patch.object(OpenStackConnector, "_get_security_groups_starting_machine") @patch.object(OpenStackConnector, "_get_volumes_machines_start") @patch.object(OpenStackConnector, "create_userdata") @patch.object(OpenStackConnector, "delete_keypair") def test_start_server( - self, - mock_delete_keypair, - mock_create_userdata, - mock_get_volumes, - mock_get_security_groups_starting_machine, + self, + mock_delete_keypair, + mock_create_userdata, + mock_get_volumes, + mock_get_security_groups_starting_machine, ): server = fakes.generate_fake_resource(Server) server_keypair = fakes.generate_fake_resource(keypair.Keypair) @@ -1728,8 +1790,12 @@ def test_start_server( self.openstack_connector.openstack_connection.network.find_network.return_value = ( fake_network ) - self.openstack_connector.openstack_connection.compute.find_keypair.return_value = server_keypair - self.openstack_connector.openstack_connection.compute.get_keypair.return_value = server_keypair + self.openstack_connector.openstack_connection.compute.find_keypair.return_value = ( + server_keypair + ) + self.openstack_connector.openstack_connection.compute.get_keypair.return_value = ( + server_keypair + ) mock_get_volumes.return_value = ["volume1", "volume2"] mock_create_userdata.return_value = "userdata" @@ -1760,7 +1826,7 @@ def test_start_server( volume_ids_path_attach=volume_ids_path_attach, additional_keys=additional_keys, additional_security_group_ids=additional_security_group_ids, - public_key=public_key + public_key=public_key, ) # Assertions @@ -1798,7 +1864,9 @@ def test_start_server( volume_ids_path_attach=volume_ids_path_attach, ) - self.openstack_connector.openstack_connection.get_keypair.assert_called_once_with(name_or_id=server_keypair.name) + self.openstack_connector.openstack_connection.get_keypair.assert_called_once_with( + name_or_id=server_keypair.name + ) mock_delete_keypair.assert_any_call(key_name=server_keypair.name) # Check the result @@ -1810,12 +1878,12 @@ def test_start_server( @patch.object(OpenStackConnector, "delete_keypair") @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") def test_start_server_exception( - self, - mock_logger_exception, - mock_delete_keypair, - mock_create_userdata, - mock_get_volumes, - mock_get_security_groups_starting_machine, + self, + mock_logger_exception, + mock_delete_keypair, + mock_create_userdata, + mock_get_volumes, + mock_get_security_groups_starting_machine, ): server = fakes.generate_fake_resource(Server) server_keypair = fakes.generate_fake_resource(keypair.Keypair) @@ -1867,17 +1935,14 @@ def test_start_server_exception( volume_ids_path_attach=volume_ids_path_attach, additional_keys=additional_keys, additional_security_group_ids=additional_security_group_ids, - public_key=public_key + public_key=public_key, ) - mock_logger_exception.assert_any_call( - (f"Start Server {servername} error") - ) + mock_logger_exception.assert_any_call((f"Start Server {servername} error")) @patch.object(OpenStackConnector, "create_add_keys_script") @patch.object(OpenStackConnector, "create_mount_init_script") def test_create_userdata( - self, mock_create_mount_init_script, - mock_create_add_keys_script + self, mock_create_mount_init_script, mock_create_add_keys_script ): # Set up mocks mock_create_add_keys_script.return_value = b"mock_add_keys_script" @@ -1901,24 +1966,24 @@ def test_create_userdata( # Check the result expected_result = ( - b"mock_add_keys_script\n" - + b"#!/bin/bash\npasswd -u ubuntu\n" - + b"\nmock_mount_script" + b"mock_add_keys_script\n" + + b"#!/bin/bash\npasswd -u ubuntu\n" + + b"\nmock_mount_script" ) self.assertEqual(result, expected_result) @patch.object(OpenStackConnector, "get_server") @patch("simple_vm_client.openstack_connector.openstack_connector.sympy.symbols") @patch("simple_vm_client.openstack_connector.openstack_connector.sympy.sympify") - def test_get_vm_ports( - self, mock_sympify, mock_symbols, mock_get_server - ): + def test_get_vm_ports(self, mock_sympify, mock_symbols, mock_get_server): # Set up mocks mock_server = fakes.generate_fake_resource(server.Server) mock_server["private_v4"] = "192.168.1.2" mock_get_server.return_value = mock_server - mock_sympify.return_value.evalf.return_value = 30258 # Replace with expected values + mock_sympify.return_value.evalf.return_value = ( + 30258 # Replace with expected values + ) mock_symbols.side_effect = ["x", "y"] # Call the method @@ -1933,13 +1998,18 @@ def test_get_vm_ports( mock_sympify.return_value.evalf.assert_called_with(subs={"x": 2, "y": 1}) # Check the result - expected_result = {"port": "30258", "udp": "30258"} # Replace with expected values + expected_result = { + "port": "30258", + "udp": "30258", + } # Replace with expected values self.assertEqual(result, expected_result) @patch.object(OpenStackConnector, "get_server") @patch.object(OpenStackConnector, "_validate_server_for_deletion") @patch.object(OpenStackConnector, "_remove_security_groups_from_server") - def test_delete_server_successful(self, mock_remove_security_groups, mock_validate_server, mock_get_server): + def test_delete_server_successful( + self, mock_remove_security_groups, mock_validate_server, mock_get_server + ): # Arrange mock_server = fakes.generate_fake_resource(server.Server) @@ -1951,7 +2021,9 @@ def test_delete_server_successful(self, mock_remove_security_groups, mock_valida mock_get_server.assert_called_once_with(openstack_id=mock_server.id) mock_validate_server.assert_called_once_with(server=mock_server) mock_remove_security_groups.assert_called_once_with(server=mock_server) - self.openstack_connector.openstack_connection.compute.delete_server.assert_called_once_with(mock_server.id, force=True) + self.openstack_connector.openstack_connection.compute.delete_server.assert_called_once_with( + mock_server.id, force=True + ) @patch.object(OpenStackConnector, "get_server") def test_delete_server_exception(self, mock_get_server): @@ -2015,13 +2087,19 @@ def test_remove_security_groups_from_server_no_security_groups(self): self.assertTrue(True) @patch.object(OpenStackConnector, "is_security_group_in_use") - def test_remove_security_groups_from_server_with_security_groups(self, mock_is_security_group_in_use): + def test_remove_security_groups_from_server_with_security_groups( + self, mock_is_security_group_in_use + ): # Arrange server_mock = fakes.generate_fake_resource(server.Server) - fake_groups = list(fakes.generate_fake_resources(security_group.SecurityGroup, count=4)) + fake_groups = list( + fakes.generate_fake_resources(security_group.SecurityGroup, count=4) + ) fake_groups[2].name = "bibigrid-sec" server_mock.security_groups = fake_groups - self.openstack_connector.openstack_connection.get_security_group.side_effect = fake_groups + self.openstack_connector.openstack_connection.get_security_group.side_effect = ( + fake_groups + ) mock_is_security_group_in_use.side_effect = [False, False, True, True] # Act @@ -2033,11 +2111,17 @@ def test_remove_security_groups_from_server_with_security_groups(self, mock_is_s ) with self.assertRaises(AssertionError): - self.openstack_connector.openstack_connection.delete_security_group.assert_any_call(fake_groups[2]) - self.openstack_connector.openstack_connection.delete_security_group.assert_any_call(fake_groups[3]) + self.openstack_connector.openstack_connection.delete_security_group.assert_any_call( + fake_groups[2] + ) + self.openstack_connector.openstack_connection.delete_security_group.assert_any_call( + fake_groups[3] + ) for group in fake_groups[:2]: - self.openstack_connector.openstack_connection.delete_security_group.assert_any_call(group) + self.openstack_connector.openstack_connection.delete_security_group.assert_any_call( + group + ) @patch.object(OpenStackConnector, "get_server") def test_stop_server_success(self, mock_get_server): @@ -2055,15 +2139,21 @@ def test_stop_server_success(self, mock_get_server): @patch.object(OpenStackConnector, "get_server") @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") - def test_stop_server_conflict_exception(self, mock_logger_exception, mock_get_server): + def test_stop_server_conflict_exception( + self, mock_logger_exception, mock_get_server + ): # Arrange server_mock = fakes.generate_fake_resource(server.Server) mock_get_server.return_value = server_mock - self.openstack_connector.openstack_connection.compute.stop_server.side_effect = ConflictException("Unit Test") + self.openstack_connector.openstack_connection.compute.stop_server.side_effect = ConflictException( + "Unit Test" + ) # Act with self.assertRaises(OpenStackConflictException): self.openstack_connector.stop_server(openstack_id="some_openstack_id") - mock_logger_exception.assert_called_once_with(f"Stop Server some_openstack_id failed!") + mock_logger_exception.assert_called_once_with( + "Stop Server some_openstack_id failed!" + ) @patch.object(OpenStackConnector, "get_server") def test_reboot_server_success(self, mock_get_server): @@ -2081,13 +2171,18 @@ def test_reboot_server_success(self, mock_get_server): @patch.object(OpenStackConnector, "get_server") @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") - def test_reboot_server_conflict_exception(self, mock_logger_exception, mock_get_server): - - self.openstack_connector.openstack_connection.compute.reboot_server.side_effect = ConflictException("Unit Test") + def test_reboot_server_conflict_exception( + self, mock_logger_exception, mock_get_server + ): + self.openstack_connector.openstack_connection.compute.reboot_server.side_effect = ConflictException( + "Unit Test" + ) # Act with self.assertRaises(OpenStackConflictException): self.openstack_connector.reboot_server("some_openstack_id", "SOFT") - mock_logger_exception.assert_called_once_with(f"Reboot Server some_openstack_id failed!") + mock_logger_exception.assert_called_once_with( + "Reboot Server some_openstack_id failed!" + ) @patch.object(OpenStackConnector, "get_server") def test_reboot_soft_server(self, mock_get_server): @@ -2125,20 +2220,26 @@ def test_resume_server_success(self, mock_get_server): @patch.object(OpenStackConnector, "get_server") @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") - def test_resume_server_conflict_exception(self, mock_logger_exception, mock_get_server): - - self.openstack_connector.openstack_connection.compute.start_server.side_effect = ConflictException("Unit Test") + def test_resume_server_conflict_exception( + self, mock_logger_exception, mock_get_server + ): + self.openstack_connector.openstack_connection.compute.start_server.side_effect = ConflictException( + "Unit Test" + ) # Act with self.assertRaises(OpenStackConflictException): self.openstack_connector.resume_server("some_openstack_id") - mock_logger_exception.assert_called_once_with(f"Resume Server some_openstack_id failed!") + mock_logger_exception.assert_called_once_with( + "Resume Server some_openstack_id failed!" + ) @patch.object(OpenStackConnector, "_calculate_vm_ports") @patch.object(OpenStackConnector, "get_image") @patch.object(OpenStackConnector, "get_flavor") @patch.object(OpenStackConnector, "netcat") - def test_get_server(self, mock_netcat, - mock_get_flavor, mock_get_image, mock_calculate_ports): + def test_get_server( + self, mock_netcat, mock_get_flavor, mock_get_image, mock_calculate_ports + ): # Arrange openstack_id = "your_openstack_id" server_mock = fakes.generate_fake_resource(server.Server) @@ -2149,7 +2250,9 @@ def test_get_server(self, mock_netcat, server_mock.flavor = flavor_mock # Mocking the methods and attributes - self.openstack_connector.openstack_connection.get_server_by_id.return_value = server_mock + self.openstack_connector.openstack_connection.get_server_by_id.return_value = ( + server_mock + ) mock_get_image.return_value = image_mock mock_get_flavor.return_value = flavor_mock mock_calculate_ports.return_value = (30111, 30111) @@ -2159,9 +2262,13 @@ def test_get_server(self, mock_netcat, self.openstack_connector.get_server(openstack_id) # Assert - self.openstack_connector.openstack_connection.get_server_by_id.assert_called_once_with(id=openstack_id) + self.openstack_connector.openstack_connection.get_server_by_id.assert_called_once_with( + id=openstack_id + ) mock_calculate_ports.assert_called_once_with(server=server_mock) - mock_netcat.assert_called_once_with(host=self.openstack_connector.GATEWAY_IP, port=30111) + mock_netcat.assert_called_once_with( + host=self.openstack_connector.GATEWAY_IP, port=30111 + ) mock_get_image.assert_called_once_with( name_or_id=image_mock.id, ignore_not_active=True, @@ -2171,16 +2278,22 @@ def test_get_server(self, mock_netcat, mock_netcat.return_value = False # Assuming SSH connection is successful # Act result_server = self.openstack_connector.get_server(openstack_id) - self.assertEqual(result_server.task_state, VmTaskStates.CHECKING_SSH_CONNECTION.value) + self.assertEqual( + result_server.task_state, VmTaskStates.CHECKING_SSH_CONNECTION.value + ) def test_get_server_not_found(self): - self.openstack_connector.openstack_connection.get_server_by_id.return_value = None + self.openstack_connector.openstack_connection.get_server_by_id.return_value = ( + None + ) with self.assertRaises(ServerNotFoundException): self.openstack_connector.get_server("someid") def test_get_server_openstack_exception(self): - self.openstack_connector.openstack_connection.get_server_by_id.side_effect = OpenStackCloudException("UNit Test") + self.openstack_connector.openstack_connection.get_server_by_id.side_effect = ( + OpenStackCloudException("UNit Test") + ) with self.assertRaises(DefaultException): self.openstack_connector.get_server("someid") @@ -2206,25 +2319,34 @@ def test_set_server_metadata_exception(self, mock_get_server): server_mock = fakes.generate_fake_resource(server.Server) mock_get_server.return_value = server_mock metadata = {"data": "123"} - self.openstack_connector.openstack_connection.compute.set_server_metadata.side_effect = OpenStackCloudException("Unit Tests") + self.openstack_connector.openstack_connection.compute.set_server_metadata.side_effect = OpenStackCloudException( + "Unit Tests" + ) # Act with self.assertRaises(DefaultException): self.openstack_connector.set_server_metadata(server_mock.id, metadata) @patch.object(OpenStackConnector, "get_server") @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") - def test_reboot_server_conflict_exception(self, mock_logger_exception, mock_get_server): - - self.openstack_connector.openstack_connection.compute.reboot_server.side_effect = ConflictException("Unit Test") + def test_reboot_server_conflict_exception( + self, mock_logger_exception, mock_get_server + ): + self.openstack_connector.openstack_connection.compute.reboot_server.side_effect = ConflictException( + "Unit Test" + ) # Act with self.assertRaises(OpenStackConflictException): self.openstack_connector.reboot_server("some_openstack_id", "SOFT") - mock_logger_exception.assert_called_once_with(f"Reboot Server some_openstack_id failed!") + mock_logger_exception.assert_called_once_with( + f"Reboot Server some_openstack_id failed!" + ) def test_exist_server_true(self): server_mock = fakes.generate_fake_resource(server.Server) - self.openstack_connector.openstack_connection.compute.find_server.return_value = server_mock + self.openstack_connector.openstack_connection.compute.find_server.return_value = ( + server_mock + ) result = self.openstack_connector.exist_server(server_mock.name) self.assertTrue(result) @@ -2232,19 +2354,27 @@ def test_exist_server_true(self): def test_exist_server_false(self): server_mock = fakes.generate_fake_resource(server.Server) - self.openstack_connector.openstack_connection.compute.find_server.return_value = None + self.openstack_connector.openstack_connection.compute.find_server.return_value = ( + None + ) result = self.openstack_connector.exist_server(server_mock.name) self.assertFalse(result) def test_get_or_create_project_security_group_exists(self): # Mock the get_security_group method to simulate an existing security group - existing_security_group = fakes.generate_fake_resource(security_group.SecurityGroup) + existing_security_group = fakes.generate_fake_resource( + security_group.SecurityGroup + ) - self.openstack_connector.openstack_connection.get_security_group.return_value = existing_security_group + self.openstack_connector.openstack_connection.get_security_group.return_value = ( + existing_security_group + ) # Call the method - result = self.openstack_connector.get_or_create_project_security_group("project_name", "project_id") + result = self.openstack_connector.get_or_create_project_security_group( + "project_name", "project_id" + ) # Assertions self.assertEqual(result, existing_security_group.id) @@ -2252,14 +2382,20 @@ def test_get_or_create_project_security_group_exists(self): def test_get_or_create_project_security_group_create_new(self): # Mock the get_security_group method to simulate a non-existing security group - self.openstack_connector.openstack_connection.get_security_group.return_value = None + self.openstack_connector.openstack_connection.get_security_group.return_value = ( + None + ) # Mock the create_security_group method to simulate creating a new security group new_security_group = fakes.generate_fake_resource(security_group.SecurityGroup) - self.openstack_connector.openstack_connection.create_security_group.return_value = new_security_group + self.openstack_connector.openstack_connection.create_security_group.return_value = ( + new_security_group + ) # Call the method - result = self.openstack_connector.get_or_create_project_security_group("project_name", "project_id") + result = self.openstack_connector.get_or_create_project_security_group( + "project_name", "project_id" + ) # Assertions self.assertEqual(result, new_security_group.id) @@ -2267,9 +2403,13 @@ def test_get_or_create_project_security_group_create_new(self): def test_get_or_create_vm_security_group_exist(self): # Mock the get_security_group method to simulate an existing security group - existing_security_group = fakes.generate_fake_resource(security_group.SecurityGroup) + existing_security_group = fakes.generate_fake_resource( + security_group.SecurityGroup + ) - self.openstack_connector.openstack_connection.get_security_group.return_value = existing_security_group + self.openstack_connector.openstack_connection.get_security_group.return_value = ( + existing_security_group + ) # Call the method result = self.openstack_connector.get_or_create_vm_security_group("server_id") @@ -2280,14 +2420,20 @@ def test_get_or_create_vm_security_group_exist(self): def test_get_or_create_vm_security_group_create_new(self): # Mock the get_security_group method to simulate a non-existing security group - self.openstack_connector.openstack_connection.get_security_group.return_value = None + self.openstack_connector.openstack_connection.get_security_group.return_value = ( + None + ) # Mock the create_security_group method to simulate creating a new security group new_security_group = fakes.generate_fake_resource(security_group.SecurityGroup) - self.openstack_connector.openstack_connection.create_security_group.return_value = new_security_group + self.openstack_connector.openstack_connection.create_security_group.return_value = ( + new_security_group + ) # Call the method - result = self.openstack_connector.get_or_create_vm_security_group("openstack_id") + result = self.openstack_connector.get_or_create_vm_security_group( + "openstack_id" + ) # Assertions self.assertEqual(result, new_security_group.id) @@ -2295,12 +2441,20 @@ def test_get_or_create_vm_security_group_create_new(self): def test_get_or_create_research_environment_security_group_exist(self): # Mock the get_security_group method to simulate an existing security group - existing_security_group = fakes.generate_fake_resource(security_group.SecurityGroup) + existing_security_group = fakes.generate_fake_resource( + security_group.SecurityGroup + ) - self.openstack_connector.openstack_connection.get_security_group.return_value = existing_security_group + self.openstack_connector.openstack_connection.get_security_group.return_value = ( + existing_security_group + ) # Call the method - result = self.openstack_connector.get_or_create_research_environment_security_group(resenv_metadata=METADATA_EXAMPLE) + result = ( + self.openstack_connector.get_or_create_research_environment_security_group( + resenv_metadata=METADATA_EXAMPLE + ) + ) # Assertions self.assertEqual(result, existing_security_group.id) @@ -2308,19 +2462,29 @@ def test_get_or_create_research_environment_security_group_exist(self): def test_get_or_create_research_environment_no_forc_support(self): # Mock the get_security_group method to simulate a non-existing security group - self.openstack_connector.get_or_create_research_environment_security_group(resenv_metadata=METADATA_EXAMPLE_NO_FORC) + self.openstack_connector.get_or_create_research_environment_security_group( + resenv_metadata=METADATA_EXAMPLE_NO_FORC + ) self.openstack_connector.openstack_connection.get_security_group.assert_not_called() def test_get_or_create_research_environment_security_group_new(self): # Mock the get_security_group method to simulate a non-existing security group - self.openstack_connector.openstack_connection.get_security_group.return_value = None + self.openstack_connector.openstack_connection.get_security_group.return_value = ( + None + ) # Mock the create_security_group method to simulate creating a new security group new_security_group = fakes.generate_fake_resource(security_group.SecurityGroup) - self.openstack_connector.openstack_connection.create_security_group.return_value = new_security_group + self.openstack_connector.openstack_connection.create_security_group.return_value = ( + new_security_group + ) # Call the method - result = self.openstack_connector.get_or_create_research_environment_security_group(resenv_metadata=METADATA_EXAMPLE) + result = ( + self.openstack_connector.get_or_create_research_environment_security_group( + resenv_metadata=METADATA_EXAMPLE + ) + ) # Assertions self.assertEqual(result, new_security_group.id) @@ -2329,7 +2493,9 @@ def test_get_or_create_research_environment_security_group_new(self): def test_is_security_group_in_use_instances(self): # Mock the compute.servers method to simulate instances using the security group instances = [{"id": "instance_id", "name": "instance_name"}] - self.openstack_connector.openstack_connection.compute.servers = MagicMock(return_value=instances) + self.openstack_connector.openstack_connection.compute.servers = MagicMock( + return_value=instances + ) # Call the method result = self.openstack_connector.is_security_group_in_use("security_group_id") @@ -2340,7 +2506,8 @@ def test_is_security_group_in_use_instances(self): def test_is_security_group_in_use_ports(self): # Mock the network.ports method to simulate ports associated with the security group ports = [{"id": "port_id", "name": "port_name"}] - self.openstack_connector.openstack_connection.network.ports = MagicMock(return_value=ports) + self.openstack_connector.openstack_connection.compute.servers.return_value = [] + self.openstack_connector.openstack_connection.network.ports.return_value = ports # Call the method result = self.openstack_connector.is_security_group_in_use("security_group_id") @@ -2351,7 +2518,13 @@ def test_is_security_group_in_use_ports(self): def test_is_security_group_in_use_load_balancers(self): # Mock the network.load_balancers method to simulate load balancers associated with the security group load_balancers = [{"id": "lb_id", "name": "lb_name"}] - self.openstack_connector.openstack_connection.network.load_balancers = MagicMock(return_value=load_balancers) + self.openstack_connector.openstack_connection.compute.servers.return_value = [] + self.openstack_connector.openstack_connection.network.ports.return_value = [] + + self.openstack_connector.openstack_connection.network.load_balancers.return_value = [ + 1, + 2, + ] # Call the method result = self.openstack_connector.is_security_group_in_use("security_group_id") @@ -2361,10 +2534,15 @@ def test_is_security_group_in_use_load_balancers(self): def test_is_security_group_not_in_use(self): # Mock both compute.servers and network.ports methods to simulate no usage of the security group - self.openstack_connector.openstack_connection.compute.servers = MagicMock(return_value=[]) - self.openstack_connector.openstack_connection.network.ports = MagicMock(return_value=[]) - self.openstack_connector.openstack_connection.network.load_balancers = MagicMock(return_value=[]) - + self.openstack_connector.openstack_connection.compute.servers = MagicMock( + return_value=[] + ) + self.openstack_connector.openstack_connection.network.ports = MagicMock( + return_value=[] + ) + self.openstack_connector.openstack_connection.network.load_balancers = ( + MagicMock(return_value=[]) + ) # Call the method result = self.openstack_connector.is_security_group_in_use("security_group_id") @@ -2372,14 +2550,33 @@ def test_is_security_group_not_in_use(self): # Assertions self.assertFalse(result) + def test_create_security_group_exist(self): + fake_sg = fakes.generate_fake_resource(security_group.SecurityGroup) + self.openstack_connector.openstack_connection.get_security_group.return_value = ( + fake_sg + ) + # Call the method + result = self.openstack_connector.create_security_group( + name=fake_sg.name, + udp_port=1234, + ssh=True, + udp=True, + description=fake_sg.description, + research_environment_metadata=METADATA_EXAMPLE, + ) + self.assertEqual(result, fake_sg) def test_create_security_group(self): # Mock the get_security_group method to simulate non-existing security group - self.openstack_connector.openstack_connection.get_security_group.return_value = None + self.openstack_connector.openstack_connection.get_security_group.return_value = ( + None + ) # Mock the create_security_group method to return a fake SecurityGroup fake_sg = fakes.generate_fake_resource(security_group.SecurityGroup) - self.openstack_connector.openstack_connection.create_security_group.return_value =fake_sg + self.openstack_connector.openstack_connection.create_security_group.return_value = ( + fake_sg + ) # Call the method result = self.openstack_connector.create_security_group( @@ -2393,7 +2590,9 @@ def test_create_security_group(self): # Assertions self.assertEqual(result, fake_sg) - self.openstack_connector.openstack_connection.create_security_group.assert_called_once_with(name=fake_sg.name, description=fake_sg.description) + self.openstack_connector.openstack_connection.create_security_group.assert_called_once_with( + name=fake_sg.name, description=fake_sg.description + ) self.openstack_connector.openstack_connection.create_security_group_rule.assert_any_call( direction="ingress", protocol="udp", @@ -2419,7 +2618,7 @@ def test_create_security_group(self): port_range_min=22, secgroup_name_or_id=fake_sg.id, remote_group_id=self.openstack_connector.GATEWAY_SECURITY_GROUP_ID, - ) + ) self.openstack_connector.openstack_connection.create_security_group_rule.assert_any_call( direction="ingress", protocol="tcp", @@ -2430,5 +2629,324 @@ def test_create_security_group(self): remote_group_id=self.openstack_connector.GATEWAY_SECURITY_GROUP_ID, ) + def test_open_port_range_for_vm_in_project_exception(self): + with self.assertRaises(DefaultException): + self.openstack_connector.open_port_range_for_vm_in_project( + range_start=1000, + range_stop=1000, + openstack_id="wewqeq", + ethertype="IPV7", + protocol="TCP", + ) + + @patch.object(OpenStackConnector, "get_or_create_project_security_group") + @patch.object(OpenStackConnector, "get_or_create_vm_security_group") + @patch.object(OpenStackConnector, "get_server") + @patch("simple_vm_client.openstack_connector.openstack_connector.logger.exception") + def test_open_port_range_for_vm_in_project_conflict_exception( + self, mock_logger_exception, mock_get_server, mock_get_vm_sg, mock_get_proj_sg + ): + fake_server = fakes.generate_fake_resource(server.Server) + + fake_project_sg = fakes.generate_fake_resource(security_group.SecurityGroup) + fake_vm_sg = fakes.generate_fake_resource(security_group.SecurityGroup) + fake_sg_rule = fakes.generate_fake_resource( + security_group_rule.SecurityGroupRule + ) + fake_server.security_groups = [fake_vm_sg, fake_project_sg] + fake_server.metadata = { + "project_name": "fake_project", + "project_id": "fake_project_id", + } + mock_get_server.return_value = fake_server + self.openstack_connector.openstack_connection.create_security_group_rule.return_value = ( + fake_sg_rule + ) + + self.openstack_connector.openstack_connection.create_security_group_rule.side_effect = ConflictException( + "Unit Test" + ) + with self.assertRaises(OpenStackConflictException): + self.openstack_connector.open_port_range_for_vm_in_project( + range_start=1000, range_stop=1000, openstack_id="test" + ) + mock_logger_exception.assert_called_once_with( + f"Could not create security group rule for instance test" + ) + + @patch.object(OpenStackConnector, "get_or_create_project_security_group") + @patch.object(OpenStackConnector, "get_or_create_vm_security_group") + @patch.object(OpenStackConnector, "get_server") + def test_open_port_range_for_vm_in_project( + self, + mock_get_server, + mock_get_vm_sg, + mock_get_proj_sg, + ): + # Mock the get_server_by_id method to return a fake Server + fake_server = fakes.generate_fake_resource(server.Server) + + fake_project_sg = fakes.generate_fake_resource(security_group.SecurityGroup) + fake_vm_sg = fakes.generate_fake_resource(security_group.SecurityGroup) + fake_sg_rule = fakes.generate_fake_resource( + security_group_rule.SecurityGroupRule + ) + fake_server.security_groups = [fake_vm_sg, fake_project_sg] + fake_server.metadata = { + "project_name": "fake_project", + "project_id": "fake_project_id", + } + mock_get_server.return_value = fake_server + + # Mock the get_or_create_project_security_group method to return a fake security group ID + mock_get_proj_sg.return_value = fake_project_sg.id + + # Mock the get_or_create_vm_security_group method to return a fake security group ID + mock_get_vm_sg.return_value = fake_vm_sg.id + + # Mock the create_security_group_rule method to return a fake security group rule + self.openstack_connector.openstack_connection.create_security_group_rule.return_value = ( + fake_sg_rule + ) + + # Call the method + result = self.openstack_connector.open_port_range_for_vm_in_project( + range_start=1000, + range_stop=2000, + openstack_id=fake_server.id, + ethertype="IPv4", + protocol="TCP", + ) + + # Assertions + self.assertEqual(fake_sg_rule, fake_sg_rule) + mock_get_server.assert_called_once_with(openstack_id=fake_server.id) + mock_get_proj_sg.assert_called_once_with( + project_name="fake_project", project_id="fake_project_id" + ) + mock_get_vm_sg.assert_called_once_with(openstack_id=fake_server.id) + self.openstack_connector.openstack_connection.add_server_security_groups.assert_called_once_with( + server=fake_server, security_groups=[fake_vm_sg.id] + ) + + def test_create_or_get_default_ssh_security_group_exists(self): + # Mock the get_security_group method to simulate an existing security group + existing_security_group = fakes.generate_fake_resource( + security_group.SecurityGroup + ) + + self.openstack_connector.openstack_connection.get_security_group.return_value = ( + existing_security_group + ) + + # Call the method + result = self.openstack_connector.create_or_get_default_ssh_security_group() + + # Assertions + self.assertEqual(result.id, existing_security_group.id) + self.openstack_connector.openstack_connection.create_security_group.assert_not_called() + + def test_create_or_get_default_ssh_security_group_create_new(self): + # Mock the get_security_group method to simulate a non-existing security group + self.openstack_connector.openstack_connection.get_security_group.return_value = ( + None + ) + + # Mock the create_security_group method to simulate creating a new security group + new_security_group = fakes.generate_fake_resource(security_group.SecurityGroup) + self.openstack_connector.openstack_connection.create_security_group.return_value = ( + new_security_group + ) + + # Call the method + result = self.openstack_connector.create_or_get_default_ssh_security_group() + + # Assertions + self.assertEqual(result.id, new_security_group.id) + self.openstack_connector.openstack_connection.create_security_group.assert_called_once() + + @patch("simple_vm_client.openstack_connector.openstack_connector.os.path.dirname") + @patch("simple_vm_client.openstack_connector.openstack_connector.os.path.abspath") + @patch("simple_vm_client.openstack_connector.openstack_connector.os.path.join") + @patch( + "simple_vm_client.openstack_connector.openstack_connector.open", + new_callable=unittest.mock.mock_open, + read_data="mock_script_content", + ) + def test_create_mount_init_script( + self, mock_open, mock_join, mock_abspath, mock_dirname + ): + # Mock the relevant parts for os.path operations + mock_abspath.return_value = "/mock/absolute/path" + mock_dirname.return_value = "/mock/directory" + mock_join.return_value = "/mock/join/path" + + # Call the method with sample volume data + result = self.openstack_connector.create_mount_init_script( + new_volumes=[{"openstack_id": "vol_id_1", "path": "/path_1"}], + attach_volumes=[{"openstack_id": "vol_id_2", "path": "/path_2"}], + ) + + # Assertions + self.assertEqual(result, b"mock_script_content") + mock_open.assert_called_once_with("/mock/join/path", "r") + mock_open().read.assert_called_once() + + def test_create_mount_init_script_no_volumes(self): + result = self.openstack_connector.create_mount_init_script( + new_volumes=None, attach_volumes=None + ) + self.assertEqual(result, "") + + def test_create_mount_init_script_no_new_volumes(self): + result = self.openstack_connector.create_mount_init_script( + new_volumes=None, + attach_volumes=[{"openstack_id": "vol_id_1", "path": "/path_1"}], + ) + self.assertIn(b"vol_id_1", result) + + def test_create_mount_init_script_no_attach_volumes(self): + result = self.openstack_connector.create_mount_init_script( + new_volumes=[{"openstack_id": "vol_id_2", "path": "/path_2"}], + attach_volumes=None, + ) + self.assertIn(b"vol_id_2", result) + + def test_delete_security_group_rule_sucess(self): + self.openstack_connector.openstack_connection.delete_security_group_rule.return_value = ( + True + ) + self.openstack_connector.delete_security_group_rule("rule_id") + self.openstack_connector.openstack_connection.delete_security_group_rule.assert_called_once_with( + rule_id="rule_id" + ) + + def test_delete_security_group_rule_failure(self): + self.openstack_connector.openstack_connection.delete_security_group_rule.return_value = ( + False + ) + with self.assertRaises(DefaultException): + self.openstack_connector.delete_security_group_rule("rule_id") + self.openstack_connector.openstack_connection.delete_security_group_rule.assert_called_once_with( + rule_id="rule_id" + ) + + def test_get_gateway_ip(self): + result = self.openstack_connector.get_gateway_ip() + self.assertEqual(result, {"gateway_ip": self.openstack_connector.GATEWAY_IP}) + + def test_get_calculation_values(self): + result = self.openstack_connector.get_calculation_values() + self.assertEqual( + result, + { + "SSH_PORT_CALCULATION": self.openstack_connector.SSH_PORT_CALCULATION, + "UDP_PORT_CALCULATION": self.openstack_connector.UDP_PORT_CALCULATION, + }, + ) + + def test_create_volume_by_volume_snap_exception(self): + fake_source_volume = fakes.generate_fake_resource(volume.Volume) + fake_result_volume = fakes.generate_fake_resource(volume.Volume) + + self.openstack_connector.openstack_connection.block_storage.create_volume.side_effect = ResourceFailure( + "UNit Test" + ) + with self.assertRaises(ResourceNotAvailableException): + self.openstack_connector.create_volume_by_volume_snap( + volume_name=fake_result_volume.name, + metadata={"data": "data"}, + volume_snap_id=fake_source_volume.id, + ) + + def test_create_volume_by_volume_snap(self): + fake_source_volume = fakes.generate_fake_resource(volume.Volume) + fake_result_volume = fakes.generate_fake_resource(volume.Volume) + + self.openstack_connector.openstack_connection.block_storage.create_volume.return_value = ( + fake_result_volume + ) + result = self.openstack_connector.create_volume_by_volume_snap( + volume_name=fake_result_volume.name, + metadata={"data": "data"}, + volume_snap_id=fake_source_volume.id, + ) + self.assertEqual(result, fake_result_volume) + + def test_create_volume_by_source_volume(self): + fake_source_volume = fakes.generate_fake_resource(volume.Volume) + fake_result_volume = fakes.generate_fake_resource(volume.Volume) + self.openstack_connector.openstack_connection.block_storage.create_volume.return_value = ( + fake_result_volume + ) + result = self.openstack_connector.create_volume_by_source_volume( + volume_name=fake_result_volume.name, + metadata={"data": "data"}, + source_volume_id=fake_source_volume.id, + ) + self.assertEqual(result, fake_result_volume) + + def test_create_create_volume_by_source_volume_exception(self): + fake_source_volume = fakes.generate_fake_resource(volume.Volume) + fake_result_volume = fakes.generate_fake_resource(volume.Volume) + + self.openstack_connector.openstack_connection.block_storage.create_volume.side_effect = ResourceFailure( + "UNit Test" + ) + with self.assertRaises(ResourceNotAvailableException): + self.openstack_connector.create_volume_by_source_volume( + volume_name=fake_result_volume.name, + metadata={"data": "data"}, + source_volume_id=fake_source_volume.id, + ) + + @patch( + "simple_vm_client.openstack_connector.openstack_connector.connection.Connection" + ) + def test___init__(self, mock_authorize): + # Mock the relevant parts for file operations and connection + + with tempfile.NamedTemporaryFile(mode="w+", delete=False) as temp_file: + temp_file.write(CONFIG_DATA) + # Call the __init__ method + OpenStackConnector(config_file=temp_file.name) + os.remove(temp_file.name) + # Assertions + + mock_authorize.assert_called_once() + + def test___init__failed_auth(self): + # Mock the relevant parts for file operations and connection + + with tempfile.NamedTemporaryFile(mode="w+", delete=False) as temp_file: + temp_file.write(CONFIG_DATA) + # Call the __init__ method + with self.assertRaises(Exception): + openstack_connector = OpenStackConnector(config_file=temp_file.name) + os.remove(temp_file.name) + # Assertions + + @patch.dict( + os.environ, + { + "USE_APPLICATION_CREDENTIALS": "True", + "OS_APPLICATION_CREDENTIAL_ID": "APP_ID", + "OS_APPLICATION_CREDENTIAL_SECRET": "APP_SECRET", + }, + ) + @patch( + "simple_vm_client.openstack_connector.openstack_connector.connection.Connection" + ) + def test___init__os_creds(self, mock_authorize): + with tempfile.NamedTemporaryFile(mode="w+", delete=False) as temp_file: + temp_file.write(CONFIG_DATA) + # Call the __init__ method + OpenStackConnector(config_file=temp_file.name) + os.remove(temp_file.name) + # Assertions + + mock_authorize.assert_called_once() + + if __name__ == "__main__": unittest.main() From 64ea82bd65b9a529db7d288b4e549cb15fd486e9 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Thu, 4 Jan 2024 09:56:17 +0100 Subject: [PATCH 33/39] added pytest.ini and .coveragerc --- .coveragerc | 14 ++++++++++++++ .github/workflows/coverage.yml | 2 +- pytest.ini | 6 ++++++ simple_vm_client/test_openstack_connector.py | 6 +++--- 4 files changed, 24 insertions(+), 4 deletions(-) create mode 100644 .coveragerc create mode 100644 pytest.ini diff --git a/.coveragerc b/.coveragerc new file mode 100644 index 0000000..02e7226 --- /dev/null +++ b/.coveragerc @@ -0,0 +1,14 @@ +[run] +omit = + simple_vm_client/VirtualMachineService.py + simple_vm_client/constants.py + simple_vm_client/test_openstack_connector.py + simple_vm_client/ttypes.py + simple_vm_client/forc_connector/template/test_templates.py + simple_vm_client/util/logger.py + +[report] +exclude_lines = + pragma: no cover + raise NotImplementedError + if __name__ == .__main__.: diff --git a/.github/workflows/coverage.yml b/.github/workflows/coverage.yml index bdea76c..949ab55 100644 --- a/.github/workflows/coverage.yml +++ b/.github/workflows/coverage.yml @@ -24,7 +24,7 @@ jobs: - name: Build coverage file run: | - pytest --junitxml=pytest.xml --cov-report=term-missing:skip-covered --cov=simple_vm_client/openstack_connector --cov=simple_vm_client/bibigrid_connector --cov=simple_vm_client/util --cov=simple_vm_client/forc_connector | tee pytest-coverage.txt + pytest --junitxml=pytest.xml | tee pytest-coverage.txt - name: Pytest coverage comment uses: MishaKav/pytest-coverage-comment@main diff --git a/pytest.ini b/pytest.ini new file mode 100644 index 0000000..4d84681 --- /dev/null +++ b/pytest.ini @@ -0,0 +1,6 @@ +# pytest.ini + +[pytest] +addopts = --cov=. --cov-config=.coveragerc + +# Add other configuration options as needed diff --git a/simple_vm_client/test_openstack_connector.py b/simple_vm_client/test_openstack_connector.py index 00d3237..fd4edb4 100644 --- a/simple_vm_client/test_openstack_connector.py +++ b/simple_vm_client/test_openstack_connector.py @@ -2909,7 +2909,7 @@ def test___init__(self, mock_authorize): with tempfile.NamedTemporaryFile(mode="w+", delete=False) as temp_file: temp_file.write(CONFIG_DATA) # Call the __init__ method - OpenStackConnector(config_file=temp_file.name) + OpenStackConnector(config_file=temp_file.name) os.remove(temp_file.name) # Assertions @@ -2922,7 +2922,7 @@ def test___init__failed_auth(self): temp_file.write(CONFIG_DATA) # Call the __init__ method with self.assertRaises(Exception): - openstack_connector = OpenStackConnector(config_file=temp_file.name) + OpenStackConnector(config_file=temp_file.name) os.remove(temp_file.name) # Assertions @@ -2941,7 +2941,7 @@ def test___init__os_creds(self, mock_authorize): with tempfile.NamedTemporaryFile(mode="w+", delete=False) as temp_file: temp_file.write(CONFIG_DATA) # Call the __init__ method - OpenStackConnector(config_file=temp_file.name) + OpenStackConnector(config_file=temp_file.name) os.remove(temp_file.name) # Assertions From 6284cbe5c8e027c7a4c11d2981b278a39f1bf746 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Thu, 4 Jan 2024 13:25:04 +0100 Subject: [PATCH 34/39] more tests --- simple_vm_client/VirtualMachineHandler.py | 132 +++--- .../forc_connector/forc_connector.py | 38 +- .../forc_connector/test_forc_connector.py | 389 ++++++++++++++++++ .../test_virtualmachinehandler.py | 275 +++++++++++++ 4 files changed, 750 insertions(+), 84 deletions(-) create mode 100644 simple_vm_client/forc_connector/test_forc_connector.py create mode 100644 simple_vm_client/test_virtualmachinehandler.py diff --git a/simple_vm_client/VirtualMachineHandler.py b/simple_vm_client/VirtualMachineHandler.py index 4920a92..9e25c37 100644 --- a/simple_vm_client/VirtualMachineHandler.py +++ b/simple_vm_client/VirtualMachineHandler.py @@ -144,10 +144,16 @@ def get_server(self, openstack_id: str) -> VM: return server def get_servers(self) -> list[VM]: + + servers = openstack_servers = self.openstack_connector.get_servers() + servers_full=[] + + for server in servers: + servers_full.append(self.forc_connector.get_playbook_status(server=server)) serv = thrift_converter.os_to_thrift_servers( - openstack_servers=self.openstack_connector.get_servers() + openstack_servers=servers ) - return serv + return servers_full def get_servers_by_ids(self, server_ids: list[str]) -> list[VM]: return thrift_converter.os_to_thrift_servers( @@ -173,12 +179,12 @@ def get_forc_url(self) -> str: return self.forc_connector.get_forc_access_url() def create_snapshot( - self, - openstack_id: str, - name: str, - username: str, - base_tags: list[str], - description: str, + self, + openstack_id: str, + name: str, + username: str, + base_tags: list[str], + description: str, ) -> str: return self.openstack_connector.create_snapshot( openstack_id=openstack_id, @@ -192,7 +198,7 @@ def delete_image(self, image_id: str) -> None: return self.openstack_connector.delete_image(image_id=image_id) def create_volume( - self, volume_name: str, volume_storage: int, metadata: dict[str, str] + self, volume_name: str, volume_storage: int, metadata: dict[str, str] ) -> Volume: return thrift_converter.os_to_thrift_volume( openstack_volume=self.openstack_connector.create_volume( @@ -203,7 +209,7 @@ def create_volume( ) def create_volume_by_source_volume( - self, volume_name: str, metadata: dict[str, str], source_volume_id: str + self, volume_name: str, metadata: dict[str, str], source_volume_id: str ) -> Volume: return thrift_converter.os_to_thrift_volume( openstack_volume=self.openstack_connector.create_volume_by_source_volume( @@ -214,7 +220,7 @@ def create_volume_by_source_volume( ) def create_volume_by_volume_snap( - self, volume_name: str, metadata: dict[str, str], volume_snap_id: str + self, volume_name: str, metadata: dict[str, str], volume_snap_id: str ) -> Volume: return thrift_converter.os_to_thrift_volume( openstack_volume=self.openstack_connector.create_volume_by_volume_snap( @@ -225,7 +231,7 @@ def create_volume_by_volume_snap( ) def create_volume_snapshot( - self, volume_id: str, name: str, description: str + self, volume_id: str, name: str, description: str ) -> str: return self.openstack_connector.create_volume_snapshot( volume_id=volume_id, name=name, description=description @@ -250,7 +256,7 @@ def delete_volume(self, volume_id: str) -> None: return self.openstack_connector.delete_volume(volume_id=volume_id) def attach_volume_to_server( - self, openstack_id: str, volume_id: str + self, openstack_id: str, volume_id: str ) -> dict[str, str]: return self.openstack_connector.attach_volume_to_server( openstack_id=openstack_id, volume_id=volume_id @@ -260,7 +266,7 @@ def get_limits(self) -> dict[str, str]: return self.openstack_connector.get_limits() def create_backend( - self, owner: str, user_key_url: str, template: str, upstream_url: str + self, owner: str, user_key_url: str, template: str, upstream_url: str ) -> Backend: return self.forc_connector.create_backend( owner=owner, @@ -306,12 +312,12 @@ def delete_security_group_rule(self, openstack_id): ) def open_port_range_for_vm_in_project( - self, - range_start, - range_stop, - openstack_id, - ethertype: str = "IPv4", - protocol: str = "TCP", + self, + range_start, + range_stop, + openstack_id, + ethertype: str = "IPv4", + protocol: str = "TCP", ) -> str: return self.openstack_connector.open_port_range_for_vm_in_project( range_start=range_start, @@ -325,17 +331,17 @@ def add_udp_security_group(self, server_id: str) -> None: return self.openstack_connector.add_udp_security_group(server_id=server_id) def start_server( - self, - flavor_name: str, - image_name: str, - public_key: str, - servername: str, - metadata: dict[str, str], - volume_ids_path_new: list[dict[str, str]], - volume_ids_path_attach: list[dict[str, str]], - additional_keys: list[str], - research_environment: str, - additional_security_group_ids: list[str], + self, + flavor_name: str, + image_name: str, + public_key: str, + servername: str, + metadata: dict[str, str], + volume_ids_path_new: list[dict[str, str]], + volume_ids_path_attach: list[dict[str, str]], + additional_keys: list[str], + research_environment: str, + additional_security_group_ids: list[str], ) -> str: if research_environment: research_environment_metadata = ( @@ -359,15 +365,15 @@ def start_server( ) def start_server_with_custom_key( - self, - flavor_name: str, - image_name: str, - servername: str, - metadata: dict[str, str], - research_environment: str, - volume_ids_path_new: list[dict[str, str]], - volume_ids_path_attach: list[dict[str, str]], - additional_security_group_ids: list[str], + self, + flavor_name: str, + image_name: str, + servername: str, + metadata: dict[str, str], + research_environment: str, + volume_ids_path_new: list[dict[str, str]], + volume_ids_path_attach: list[dict[str, str]], + additional_security_group_ids: list[str], ) -> str: if research_environment: research_environment_metadata = ( @@ -393,14 +399,14 @@ def start_server_with_custom_key( return openstack_id def create_and_deploy_playbook( - self, - public_key: str, - openstack_id: str, - conda_packages: list[CondaPackage], - research_environment_template: str, - apt_packages: list[str], - create_only_backend: bool, - base_url: str = "", + self, + public_key: str, + openstack_id: str, + conda_packages: list[CondaPackage], + research_environment_template: str, + apt_packages: list[str], + create_only_backend: bool, + base_url: str = "", ) -> int: port = int( self.openstack_connector.get_vm_ports(openstack_id=openstack_id)["port"] @@ -430,11 +436,11 @@ def get_cluster_status(self, cluster_id: str) -> dict[str, str]: return self.bibigrid_connector.get_cluster_status(cluster_id=cluster_id) def start_cluster( - self, - public_key: str, - master_instance: ClusterInstance, - worker_instances: list[ClusterInstance], - user: str, + self, + public_key: str, + master_instance: ClusterInstance, + worker_instances: list[ClusterInstance], + user: str, ) -> dict[str, str]: return self.bibigrid_connector.start_cluster( public_key=public_key, @@ -447,16 +453,16 @@ def terminate_cluster(self, cluster_id: str) -> dict[str, str]: return self.bibigrid_connector.terminate_cluster(cluster_id=cluster_id) def add_cluster_machine( - self, - cluster_id: str, - cluster_user: str, - cluster_group_id: list[str], - image_name: str, - flavor_name: str, - name: str, - key_name: str, - batch_idx: int, - worker_idx: int, + self, + cluster_id: str, + cluster_user: str, + cluster_group_id: list[str], + image_name: str, + flavor_name: str, + name: str, + key_name: str, + batch_idx: int, + worker_idx: int, ) -> str: return self.openstack_connector.add_cluster_machine( cluster_id=cluster_id, diff --git a/simple_vm_client/forc_connector/forc_connector.py b/simple_vm_client/forc_connector/forc_connector.py index 78e035e..1ee96b9 100644 --- a/simple_vm_client/forc_connector/forc_connector.py +++ b/simple_vm_client/forc_connector/forc_connector.py @@ -177,12 +177,11 @@ def add_user_to_backend(self, backend_id: str, user_id: str) -> dict[str, str]: logger.exception(e) raise BackendNotFoundException(message=str(e), name_or_id=backend_id) - def create_backend( - self, owner: str, user_key_url: str, template: str, upstream_url: str - ) -> Backend: + def create_backend(self, owner: str, user_key_url: str, template: str, upstream_url: str) -> Backend: logger.info( f"Create Backend - [Owner:{owner}, user_key_url:{user_key_url}, template:{template}, upstream_url:{upstream_url}" ) + template_version = self.template.get_template_version_for(template=template) if template_version is None: logger.warning( @@ -192,18 +191,16 @@ def create_backend( message=f"No suitable template version found for {template}. Aborting backend creation!", template=template, ) - try: - post_url = f"{self.FORC_URL}backends" - backend_info = { - "owner": owner, - "user_key_url": user_key_url, - "template": template, - "template_version": template_version, - "upstream_url": upstream_url, - } - except Exception as e: - logger.exception(e) - raise DefaultException(message=e) + + post_url = f"{self.FORC_URL}backends" + backend_info = { + "owner": owner, + "user_key_url": user_key_url, + "template": template, + "template_version": template_version, + "upstream_url": upstream_url, + } + try: response = requests.post( post_url, @@ -212,12 +209,10 @@ def create_backend( headers={"X-API-KEY": self.FORC_API_KEY}, verify=True, ) - try: - data = response.json() - except Exception as e: - logger.exception(e) - raise DefaultException(message=e) + + data = response.json() logger.info(f"Backend created {data}") + new_backend = Backend( id=int(data["id"]), owner=data["owner"], @@ -228,7 +223,7 @@ def create_backend( return new_backend except requests.Timeout as e: - logger.info(msg=f"create_backend timed out. {e}") + logger.info(f"create_backend timed out. {e}") raise DefaultException(message=e) except Exception as e: @@ -484,6 +479,7 @@ def create_and_deploy_playbook( cloud_site=cloud_site, base_url=base_url, ) + logger.info(playbook) self.redis_connection.hset( openstack_id, "status", VmTaskStates.BUILD_PLAYBOOK.value ) diff --git a/simple_vm_client/forc_connector/test_forc_connector.py b/simple_vm_client/forc_connector/test_forc_connector.py new file mode 100644 index 0000000..5a75231 --- /dev/null +++ b/simple_vm_client/forc_connector/test_forc_connector.py @@ -0,0 +1,389 @@ +import json +import os +import tempfile +import unittest +from unittest.mock import patch, MagicMock + +import requests +from openstack.test import fakes +from openstack.compute.v2.server import Server + +from simple_vm_client.forc_connector.forc_connector import ForcConnector +from simple_vm_client.ttypes import BackendNotFoundException, DefaultException +from simple_vm_client.util.state_enums import VmTaskStates + +FORC_URL = "https://proxy-dev.bi.denbi.de:5000/" +FORC_ACCESS_URL = "https://proxy-dev.bi.denbi.de/" +GITHUB_REPO = "https://github.com/deNBI/resenvs/archive/refs/heads/staging.zip" +FORC_SECRUITY_GROUP_ID = "9a08eecc-d9a5-405b-aeda-9d4180fc94d6" +REDIS_HOST = "redis_host" +REDIS_PORT = 6379 +FORC_API_KEY = "unit_test-key" +CONFIG_DATA = f""" + redis: + host: {REDIS_HOST} + port: {REDIS_PORT} + password: "" + forc: + forc_url: {FORC_URL} + forc_access_url: {FORC_ACCESS_URL} + github_playbooks_repo: {GITHUB_REPO} + forc_security_group_id: {FORC_SECRUITY_GROUP_ID} + """ + + +class TestForcConnector(unittest.TestCase): + + @patch("simple_vm_client.forc_connector.forc_connector.redis.ConnectionPool") + @patch("simple_vm_client.forc_connector.forc_connector.redis.Redis") + @patch("simple_vm_client.forc_connector.forc_connector.Template") + def setUp(self, mock_template, mock_redis, mock_connection_pool): + with tempfile.NamedTemporaryFile(mode="w+", delete=False) as temp_file: + temp_file.write(CONFIG_DATA) + + self.forc_connector = ForcConnector(config_file=temp_file.name) + os.remove(temp_file.name) + + @patch("simple_vm_client.forc_connector.forc_connector.redis.ConnectionPool") + @patch("simple_vm_client.forc_connector.forc_connector.redis.Redis") + @patch("simple_vm_client.forc_connector.forc_connector.Template") + @patch.dict( + os.environ, + { + "FORC_API_KEY": FORC_API_KEY, + }, + ) + def test_init(self, mock_template, mock_redis, mock_connection_pool): + with tempfile.NamedTemporaryFile(mode="w+", delete=False) as temp_file: + temp_file.write(CONFIG_DATA) + ForcConnector(temp_file.name) + os.remove(temp_file.name) + + mock_template.assert_called_with( + github_playbook_repo=GITHUB_REPO, + forc_url=FORC_URL, + forc_api_key=FORC_API_KEY, + ) + mock_connection_pool.assert_called_with( + host=REDIS_HOST, port=REDIS_PORT + ) + mock_redis.assert_called_with(connection_pool=mock_connection_pool.return_value, charset="utf-8") + + def test_load_config(self): + with tempfile.NamedTemporaryFile(mode="w+", delete=False) as temp_file: + temp_file.write(CONFIG_DATA) + + self.forc_connector.load_config(config_file=temp_file.name) + os.remove(temp_file.name) + self.assertEqual(self.forc_connector.FORC_URL, FORC_URL) + self.assertEqual(self.forc_connector.FORC_ACCESS_URL, FORC_ACCESS_URL) + self.assertEqual(self.forc_connector.FORC_REMOTE_ID, FORC_SECRUITY_GROUP_ID) + self.assertEqual(self.forc_connector.GITHUB_PLAYBOOKS_REPO, GITHUB_REPO) + self.assertEqual(self.forc_connector.REDIS_HOST, REDIS_HOST) + self.assertEqual(self.forc_connector.REDIS_PORT, REDIS_PORT) + + @patch("simple_vm_client.forc_connector.forc_connector.redis.ConnectionPool") + @patch("simple_vm_client.forc_connector.forc_connector.redis.Redis") + @patch("simple_vm_client.forc_connector.forc_connector.logger.info") + @patch("simple_vm_client.forc_connector.forc_connector.logger.error") + def test_connect_to_redis(self, mock_logger_error, mock_logger_info, mock_redis, mock_redis_pool): + self.forc_connector.connect_to_redis() + mock_redis_pool.assert_any_call(host=self.forc_connector.REDIS_HOST, port=self.forc_connector.REDIS_PORT) + mock_redis.asser_called_once_with(connection_pool=self.forc_connector.redis_pool, charset="utf-8") + self.forc_connector.redis_connection.ping.return_value = True + self.forc_connector.redis_connection.ping.assert_any_call() + mock_logger_info.assert_any_call("Redis connection created!") + self.forc_connector.redis_connection.ping.return_value = False + self.forc_connector.connect_to_redis() + mock_logger_error.assert_any_call("Could not connect to redis!") + + @patch("simple_vm_client.forc_connector.forc_connector.requests.get") + def test_get_users_from_backend(self, mock_get): + backend_id = "backend_id" + get_url = f"{self.forc_connector.FORC_URL}users/{backend_id}" + return_value = MagicMock(status_code=200, body={"data"}) + return_value.json.return_value = "data" + mock_get.return_value = return_value + result = self.forc_connector.get_users_from_backend(backend_id) + mock_get.assert_called_once_with(get_url, timeout=(30, 30), headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, verify=True) + self.assertEqual(result, ["data"]) + + @patch("simple_vm_client.forc_connector.forc_connector.requests.get") + def test_get_users_from_backend_401(self, mock_get): + backend_id = "backend_id" + get_url = f"{self.forc_connector.FORC_URL}users/{backend_id}" + return_value = MagicMock(status_code=401, body={"data"}) + mock_get.return_value = return_value + result = self.forc_connector.get_users_from_backend(backend_id) + mock_get.assert_called_once_with(get_url, timeout=(30, 30), headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, verify=True) + self.assertEqual(result, ["Error: 401"]) + + @patch("simple_vm_client.forc_connector.forc_connector.requests.get") + def test_get_users_from_backend_timeout(self, mock_get): + backend_id = "backend_id" + get_url = f"{self.forc_connector.FORC_URL}users/{backend_id}" + mock_get.side_effect = requests.Timeout("UNit Test") + + result = self.forc_connector.get_users_from_backend(backend_id) + mock_get.assert_called_once_with(get_url, timeout=(30, 30), headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, verify=True) + self.assertEqual(result, []) + + @patch("simple_vm_client.forc_connector.forc_connector.requests.delete") + def test_delete_user_from_backend(self, mock_delete): + backend_id = "backend_id" + user_id = "user_id" + delete_url = f"{self.forc_connector.FORC_URL}users/{backend_id}" + user_info = {"user": user_id} + + return_value = MagicMock(status_code=200) + return_value.json.return_value = {"data": "success"} + mock_delete.return_value = return_value + + result = self.forc_connector.delete_user_from_backend(backend_id, user_id) + + mock_delete.assert_called_once_with( + delete_url, + json=user_info, + timeout=(30, 30), + headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, + verify=True, + ) + + self.assertEqual(result, {"data": "success"}) + + @patch("simple_vm_client.forc_connector.forc_connector.requests.delete") + def test_delete_user_from_backend_timeout(self, mock_delete): + backend_id = "backend_id" + user_id = "user_id" + delete_url = f"{self.forc_connector.FORC_URL}users/{backend_id}" + user_info = {"user": user_id} + + mock_delete.side_effect = requests.Timeout("Unit Test Timeout") + + result = self.forc_connector.delete_user_from_backend(backend_id, user_id) + + mock_delete.assert_called_once_with( + delete_url, + json=user_info, + timeout=(30, 30), + headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, + verify=True, + ) + + self.assertEqual(result, {"Error": "Timeout."}) + + @patch("simple_vm_client.forc_connector.forc_connector.requests.delete") + def test_delete_user_from_backend_exception(self, mock_delete): + backend_id = "backend_id" + user_id = "user_id" + delete_url = f"{self.forc_connector.FORC_URL}users/{backend_id}" + user_info = {"user": user_id} + + mock_delete.side_effect = Exception("Unit Test Exception") + + with self.assertRaises(BackendNotFoundException): + self.forc_connector.delete_user_from_backend(backend_id, user_id) + + mock_delete.assert_called_once_with( + delete_url, + json=user_info, + timeout=(30, 30), + headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, + verify=True, + ) + + @patch("simple_vm_client.forc_connector.forc_connector.requests.delete") + def test_delete_backend(self, mock_delete): + backend_id = "backend_id" + delete_url = f"{self.forc_connector.FORC_URL}backends/{backend_id}" + + return_value = MagicMock(status_code=200) + return_value.json.return_value={"data": "success"} + mock_delete.return_value = return_value + + self.forc_connector.delete_backend(backend_id) + + mock_delete.assert_called_once_with( + delete_url, + timeout=(30, 30), + headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, + verify=True, + ) + + @patch("simple_vm_client.forc_connector.forc_connector.requests.delete") + def test_delete_backend_not_found(self, mock_delete): + backend_id = "backend_id" + delete_url = f"{self.forc_connector.FORC_URL}backends/{backend_id}" + + return_value = MagicMock(status_code=404) + return_value.json.return_value={"error": "Backend not found"} + mock_delete.return_value = return_value + + with self.assertRaises(BackendNotFoundException): + self.forc_connector.delete_backend(backend_id) + + mock_delete.assert_called_once_with( + delete_url, + timeout=(30, 30), + headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, + verify=True, + ) + + @patch("simple_vm_client.forc_connector.forc_connector.requests.delete") + def test_delete_backend_server_error(self, mock_delete): + backend_id = "backend_id" + delete_url = f"{self.forc_connector.FORC_URL}backends/{backend_id}" + + return_value = MagicMock(status_code=500) + return_value.json.return_value={"error": "Internal Server Error"} + mock_delete.return_value = return_value + + with self.assertRaises(BackendNotFoundException) as context: + self.forc_connector.delete_backend(backend_id) + + mock_delete.assert_called_once_with( + delete_url, + timeout=(30, 30), + headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, + verify=True, + ) + + @patch("simple_vm_client.forc_connector.forc_connector.requests.delete") + def test_delete_backend_timeout(self, mock_delete): + backend_id = "backend_id" + delete_url = f"{self.forc_connector.FORC_URL}backends/{backend_id}" + + mock_delete.side_effect = requests.Timeout("Unit Test Timeout") + + with self.assertRaises(DefaultException) as context: + self.forc_connector.delete_backend(backend_id) + + mock_delete.assert_called_once_with( + delete_url, + timeout=(30, 30), + headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, + verify=True, + ) + + @patch("simple_vm_client.forc_connector.forc_connector.requests.post") + def test_add_user_to_backend(self, mock_post): + # Create an instance of your class + # Mock the response from requests.post + mock_response = MagicMock() + mock_response.json.return_value = {"key": "value"} + mock_post.return_value = mock_response + + # Call the method you want to test + result = self.forc_connector.add_user_to_backend(backend_id="backend_id", user_id="user_id") + + # Assertions + mock_post.assert_called_once_with( + f"{self.forc_connector.FORC_URL}users/backend_id", + json={"user": "user_id"}, + timeout=(30, 30), + headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, + verify=True, + ) + self.assertEqual(result, {"key": "value"}) + + @patch("simple_vm_client.forc_connector.forc_connector.requests.post") + def test_add_user_to_backend_timeout(self, mock_post): + mock_post.side_effect = requests.Timeout("Unit Test") + + result = self.forc_connector.add_user_to_backend(backend_id="backend_id", user_id="user_id") + + mock_post.assert_called_once_with( + f"{self.forc_connector.FORC_URL}users/backend_id", + json={"user": "user_id"}, + timeout=(30, 30), + headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, + verify=True, + ) + self.assertEqual(result, {"Error": "Timeout."}) + + @patch("simple_vm_client.forc_connector.forc_connector.requests.post") + def test_add_user_to_backend_exception(self, mock_post): + mock_post.side_effect = Exception("Unit Test") + + + with self.assertRaises(BackendNotFoundException): + self.forc_connector.add_user_to_backend(backend_id="backend_id", user_id="user_id") + + mock_post.assert_called_once_with( + f"{self.forc_connector.FORC_URL}users/backend_id", + json={"user": "user_id"}, + timeout=(30, 30), + headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, + verify=True, + ) + + def test_has_forc(self): + result = self.forc_connector.has_forc() + self.assertEqual(result, self.forc_connector.FORC_URL is not None) + + def test_get_forc_url(self): + result = self.forc_connector.get_forc_url() + + self.assertEqual(result, self.forc_connector.FORC_URL) + + def get_forc_access_url(self): + result = self.forc_connector.get_forc_access_url() + + self.assertEqual(result, self.forc_connector.FORC_ACCESS_URL) + + @patch.dict( + os.environ, + { + "FORC_API_KEY": FORC_API_KEY, + }, + ) + def test_load_env(self): + self.forc_connector.load_env() + self.assertEqual(FORC_API_KEY, self.forc_connector.FORC_API_KEY) + + def test_set_vm_wait_for_playbook(self): + openstack_id = "openstack_id" + private_key = "priv" + name = "name" + self.forc_connector.set_vm_wait_for_playbook(openstack_id=openstack_id, private_key=private_key, name=name) + self.forc_connector.redis_connection.hset.assert_called_once_with(name=openstack_id, + mapping=dict( + key=private_key, + name=name, + status=VmTaskStates.PREPARE_PLAYBOOK_BUILD.value, + ), ) + + def test_get_playbook_status(self): + fake_server=fakes.generate_fake_resource(Server) + fake_server.task_state=None + fake_playbook=MagicMock() + self.forc_connector._active_playbooks[fake_server.id]=fake_playbook + self.forc_connector.redis_connection.exists.return_value=1 + self.forc_connector.redis_connection.hget.return_value=VmTaskStates.PREPARE_PLAYBOOK_BUILD.value.encode("utf-8") + result=self.forc_connector.get_playbook_status(server=fake_server) + self.assertEqual(result.task_state,VmTaskStates.PREPARE_PLAYBOOK_BUILD.value) + self.forc_connector.redis_connection.hget.return_value=VmTaskStates.BUILD_PLAYBOOK.value.encode("utf-8") + result=self.forc_connector.get_playbook_status(server=fake_server) + self.assertEqual(result.task_state,VmTaskStates.BUILD_PLAYBOOK.value) + self.forc_connector.redis_connection.hget.return_value=VmTaskStates.PLAYBOOK_FAILED.value.encode("utf-8") + result=self.forc_connector.get_playbook_status(server=fake_server) + self.assertEqual(result.task_state,VmTaskStates.PLAYBOOK_FAILED.value) + self.forc_connector.redis_connection.hget.return_value=VmTaskStates.PLAYBOOK_SUCCESSFUL.value.encode("utf-8") + result=self.forc_connector.get_playbook_status(server=fake_server) + self.assertEqual(result.task_state,VmTaskStates.PLAYBOOK_SUCCESSFUL.value) + + + @patch("simple_vm_client.forc_connector.forc_connector.Playbook") + def test_create_and_deploy_playbook(self,mock_playbook): + key="key" + openstack_id="openstack_id" + playbook_mock=MagicMock() + mock_playbook.return_value=playbook_mock + + self.forc_connector.redis_connection.hget.return_value=key.encode("utf-8") + res=self.forc_connector.create_and_deploy_playbook(public_key=key,research_environment_template="vscode",create_only_backend=False,conda_packages=[],apt_packages=[],openstack_id=openstack_id,port=80,ip="192.168.0.1",cloud_site="Bielefeld",base_url="base_url") + self.forc_connector.redis_connection.hset.assert_called_once_with(openstack_id,"status",VmTaskStates.BUILD_PLAYBOOK.value) + self.assertEqual(res,0) + active_play=self.forc_connector._active_playbooks[openstack_id] + self.assertEqual(active_play,playbook_mock) + diff --git a/simple_vm_client/test_virtualmachinehandler.py b/simple_vm_client/test_virtualmachinehandler.py new file mode 100644 index 0000000..15bbbc5 --- /dev/null +++ b/simple_vm_client/test_virtualmachinehandler.py @@ -0,0 +1,275 @@ +import unittest +from unittest.mock import patch + +from simple_vm_client.VirtualMachineHandler import VirtualMachineHandler +from openstack.test import fakes +from openstack.image.v2 import image +from openstack.compute.v2 import server, flavor +from openstack.block_storage.v2 import volume +from openstack.block_storage.v2.snapshot import Snapshot + +IMAGES_LIST = list(fakes.generate_fake_resources(image.Image, 3)) +IMAGE = fakes.generate_fake_resource(image.Image) +FLAVORS_LIST = list(fakes.generate_fake_resources(image.Image, 3)) +FLAVOR = fakes.generate_fake_resource(flavor.Flavor) +SERVER_LIST = list(fakes.generate_fake_resources(server.Server, 3)) +SERVER = fakes.generate_fake_resource(server.Server) +VOLUME_LIST = list(fakes.generate_fake_resources(volume.Volume, 3)) +VOLUME = fakes.generate_fake_resource(volume.Volume) +VOL_SNAP = fakes.generate_fake_resource(Snapshot) +OPENSTACK_ID = "vm_id" +METADATA = {"data": "data"} +BIBIGIRD_ID = "Bibigrid_id" +NAME = "UnitTest" +USERNAME = "username" +DESCRIPTION = "desc" +STORAGE = 5 + + +class TestVirtualMachineHandler(unittest.TestCase): + + @patch("simple_vm_client.VirtualMachineHandler.OpenStackConnector") + @patch("simple_vm_client.VirtualMachineHandler.BibigridConnector") + @patch("simple_vm_client.VirtualMachineHandler.ForcConnector") + def setUp(self, mock_template, mock_redis, mock_connection_pool): + self.handler = VirtualMachineHandler(config_file="config_path") + + @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") + def test_get_images(self, converter): + self.handler.openstack_connector.get_images.return_value = IMAGES_LIST + self.handler.get_images() + self.handler.openstack_connector.get_images.assert_called_once() + converter.os_to_thrift_images.assert_called_once_with(openstack_images=IMAGES_LIST) + + @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") + def test_get_image(self, converter): + self.handler.openstack_connector.get_image.return_value = IMAGE + self.handler.get_image("image_id") + self.handler.openstack_connector.get_image.assert_called_once_with(name_or_id="image_id", ignore_not_active=False) + converter.os_to_thrift_image.assert_called_once_with(openstack_image=IMAGE) + + @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") + def test_get_public_images(self, converter): + self.handler.openstack_connector.get_public_images.return_value = IMAGES_LIST + self.handler.get_public_images() + self.handler.openstack_connector.get_public_images.assert_called_once_with() + converter.os_to_thrift_images.assert_called_once_with(openstack_images=IMAGES_LIST) + + @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") + def test_get_private_images(self, converter): + self.handler.openstack_connector.get_private_images.return_value = IMAGES_LIST + self.handler.get_private_images() + self.handler.openstack_connector.get_private_images.assert_called_once_with() + converter.os_to_thrift_images.assert_called_once_with(openstack_images=IMAGES_LIST) + + @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") + def test_get_flavors(self, converter): + self.handler.openstack_connector.get_flavors.return_value = FLAVORS_LIST + self.handler.get_flavors() + self.handler.openstack_connector.get_flavors.assert_called_once_with() + converter.os_to_thrift_flavors.assert_called_once_with(openstack_flavors=FLAVORS_LIST) + + @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") + def test_get_volume(self, converter): + self.handler.openstack_connector.get_volume.return_value = VOLUME + self.handler.get_volume("volume_id") + self.handler.openstack_connector.get_volume.assert_called_once_with(name_or_id="volume_id") + converter.os_to_thrift_volume.assert_called_once_with(openstack_volume=VOLUME) + + @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") + def test_get_volumes_by_ids(self, converter): + self.handler.openstack_connector.get_volume.side_effect = VOLUME_LIST + self.handler.get_volumes_by_ids([vol.id for vol in VOLUME_LIST]) + for vol in VOLUME_LIST: + self.handler.openstack_connector.get_volume.assert_any_call(name_or_id=vol.id) + converter.os_to_thrift_volume.assert_any_call(openstack_volume=vol) + + @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") + def test_resize_volume(self, converter): + self.handler.resize_volume("id", 5) + self.handler.openstack_connector.resize_volume.asser_called_once_with("id", 5) + + def test_get_gateway_ip(self): + self.handler.get_gateway_ip() + self.handler.openstack_connector.get_gateway_ip.assert_called_once() + + def test_get_calculation_values(self): + self.handler.get_calculation_values() + self.handler.openstack_connector.get_calculation_values.assert_called_once() + + def test_import_keypair(self): + key_name = "key" + pub_key = "pub" + self.handler.import_keypair(keyname=key_name, public_key=pub_key) + self.handler.openstack_connector.import_keypair.assert_called_once_with(keyname=key_name, public_key=pub_key) + + def test_exist_server(self) -> bool: + name = "test" + self.handler.exist_server(name=name) + self.handler.openstack_connector.exist_server.assert_called_once_with(name=name) + + def test_get_vm_ports(self): + openstack_id = "vm_id" + self.handler.get_vm_ports(openstack_id=openstack_id) + self.handler.openstack_connector.get_vm_ports.assert_called_once_with(openstack_id=openstack_id) + + def test_stop_server(self): + openstack_id = "vm_id" + self.handler.stop_server(openstack_id=openstack_id) + self.handler.openstack_connector.stop_server.assert_called_once_with(openstack_id=openstack_id) + + def test_delete_server(self) -> None: + self.handler.delete_server(openstack_id=OPENSTACK_ID) + self.handler.openstack_connector.delete_server.assert_called_once_with(openstack_id=OPENSTACK_ID) + + def test_reboot_hard_server(self) -> None: + self.handler.reboot_hard_server(openstack_id=OPENSTACK_ID) + self.handler.openstack_connector.reboot_hard_server.assert_called_once_with(openstack_id=OPENSTACK_ID) + + def test_reboot_soft_server(self) -> None: + self.handler.reboot_soft_server(openstack_id=OPENSTACK_ID) + self.handler.openstack_connector.reboot_soft_server.assert_called_once_with(openstack_id=OPENSTACK_ID) + + def test_resume_server(self) -> None: + self.handler.resume_server(openstack_id=OPENSTACK_ID) + self.handler.openstack_connector.resume_server.assert_called_once_with(openstack_id=OPENSTACK_ID) + + def test_set_server_metadata(self): + self.handler.set_server_metadata(openstack_id=OPENSTACK_ID, metadata=METADATA) + self.handler.openstack_connector.set_server_metadata.assert_called_once_with(openstack_id=OPENSTACK_ID, metadata=METADATA) + + @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") + def test_get_server(self, converter): + self.handler.openstack_connector.get_server.return_value = SERVER + self.handler.forc_connector.get_playbook_status.return_value = SERVER + + self.handler.get_server(openstack_id=OPENSTACK_ID) + self.handler.openstack_connector.get_server.assert_called_once_with(openstack_id=OPENSTACK_ID) + self.handler.forc_connector.get_playbook_status.assert_called_once_with(server=SERVER) + converter.os_to_thrift_server.assert_called_once_with(openstack_server=SERVER) + + @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") + def test_get_servers(self, converter): + self.handler.openstack_connector.get_servers.return_value = SERVER_LIST + self.handler.forc_connector.get_playbook_status.side_effect = SERVER_LIST + self.handler.get_servers() + for server in SERVER_LIST: + self.handler.forc_connector.get_playbook_status.assert_any_call(server=server) + + converter.os_to_thrift_servers.assert_any_call(openstack_servers=SERVER_LIST) + + @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") + def test_get_servers_by_ids(self, converter): + ids = [serv.id for serv in SERVER_LIST] + self.handler.openstack_connector.get_servers_by_ids.return_value = SERVER_LIST + self.handler.get_servers_by_ids(server_ids=ids) + + self.handler.openstack_connector.get_servers_by_ids.assert_called_once_with(ids=ids) + converter.os_to_thrift_servers.assert_any_call(openstack_servers=SERVER_LIST) + + @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") + def test_get_servers_by_bibigrid_id(self, converter): + self.handler.openstack_connector.get_servers_by_bibigrid_id.return_value = SERVER_LIST + self.handler.get_servers_by_bibigrid_id(bibigrid_id=BIBIGIRD_ID) + + self.handler.openstack_connector.get_servers_by_bibigrid_id.assert_called_once_with(bibigrid_id=BIBIGIRD_ID) + converter.os_to_thrift_servers.assert_called_once_with(openstack_servers=SERVER_LIST) + + def test_get_playbook_logs(self): + self.handler.get_playbook_logs(openstack_id=OPENSTACK_ID) + self.handler.forc_connector.get_playbook_logs.assert_called_once_with(openstack=OPENSTACK_ID) + + def test_has_forc(self): + self.handler.has_forc() + self.handler.forc_connector.has_forc.assert_called_once() + + def test_get_forc_url(self) -> str: + self.handler.get_forc_url() + self.handler.forc_connector.get_forc_url.assert_called_once() + + def test_create_snapshot(self) -> str: + self.handler.create_snapshot(openstack_id=OPENSTACK_ID, name=NAME, username=USERNAME, base_tags=[], description=DESCRIPTION) + self.handler.openstack_connector.create_snapshot.assert_called_once_with(openstack_id=OPENSTACK_ID, name=NAME, username=USERNAME, + base_tags=[], description=DESCRIPTION) + + def test_delete_image(self) -> None: + self.handler.delete_image(image_id=OPENSTACK_ID) + self.handler.openstack_connector.delete_image.assert_called_once_with(image_id=OPENSTACK_ID) + + @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") + def test_create_volume(self, converter): + self.handler.openstack_connector.create_volume.return_value = VOLUME + self.handler.create_volume(volume_name=NAME, volume_storage=STORAGE, metadata=METADATA) + self.handler.openstack_connector.create_volume.assert_called_once_with(volume_name=NAME, volume_storage=STORAGE, metadata=METADATA) + converter.os_to_thrift_volume.assert_called_once_with(openstack_volume=VOLUME) + + @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") + def test_create_volume_by_source_volume(self, converter): + self.handler.openstack_connector.create_volume_by_source_volume.return_value = VOLUME + self.handler.create_volume_by_source_volume(source_volume_id=OPENSTACK_ID, volume_name=NAME, metadata=METADATA) + self.handler.openstack_connector.create_volume_by_source_volume.assert_called_once_with(source_volume_id=OPENSTACK_ID, + volume_name=NAME, metadata=METADATA) + converter.os_to_thrift_volume.assert_called_once_with(openstack_volume=VOLUME) + + @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") + def test_create_volume_by_volume_snap(self, converter): + self.handler.openstack_connector.create_volume_by_volume_snap.return_value = VOLUME + self.handler.create_volume_by_volume_snap(volume_snap_id=OPENSTACK_ID, volume_name=NAME, metadata=METADATA) + self.handler.openstack_connector.create_volume_by_volume_snap.assert_called_once_with(source_volume_id=OPENSTACK_ID, + volume_name=NAME, metadata=METADATA) + converter.os_to_thrift_volume.assert_called_once_with(openstack_volume=VOLUME) + + def test_create_volume_snapshot(self): + self.handler.create_volume_snapshot(volume_id=OPENSTACK_ID, name=NAME, description=DESCRIPTION) + self.handler.openstack_connector.create_volume_snapshot.assert_called_once_with(volume_id=OPENSTACK_ID, name=NAME, + description=DESCRIPTION) + + @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") + def test_get_volume_snapshot(self, converter): + self.handler.openstack_connector.get_volume_snapshot.return_value = VOL_SNAP + self.handler.get_volume_snapshot(snapshot_id=OPENSTACK_ID) + converter.os_to_thrift_volume_snapshot.assert_called_once_with(openstack_snapshot=VOL_SNAP) + + def test_delete_volume_snapshot(self): + self.handler.delete_volume_snapshot(snapshot_id=OPENSTACK_ID) + self.handler.openstack_connector.delete_volume_snapshot.assert_called_once_with(snapshot_id=OPENSTACK_ID) + + def test_detach_volume(self): + self.handler.detach_volume(volume_id=OPENSTACK_ID, server_id=OPENSTACK_ID) + self.handler.openstack_connector.detach_volume.assert_called_once_with(volume_id=OPENSTACK_ID, server_id=OPENSTACK_ID) + + def test_delete_volume(self): + self.handler.delete_volume(volume_id=OPENSTACK_ID) + self.handler.openstack_connector.delete_volume.assert_called_once_with(snapshot_id=OPENSTACK_ID) + + def test_attach_volume_to_server(self): + self.handler.attach_volume_to_server(openstack_id=OPENSTACK_ID, volume_id=OPENSTACK_ID) + self.handler.openstack_connector.attach_volume_to_server.assert_called_once_with(snapshot_id=OPENSTACK_ID) + + def test_get_limits(self): + self.handler.get_limits() + self.handler.openstack_connector.get_limits.assert_called_once() + + def create_backend(self): + self.handler.create_backend(owner=USERNAME, user_key_url=USERNAME, template=USERNAME, upstream_url=USERNAME) + self.handler.forc_connector.create_backend.assert_called_once_with(owner=USERNAME, user_key_url=USERNAME, template=USERNAME, + upstream_url=USERNAME) + + def test_delete_backend(self, id: str) -> None: + self.handler.delete_backend(id=OPENSTACK_ID) + self.handler.forc_connector.delete_backend.assert_called_once_with(id=OPENSTACK_ID) + + def test_get_backends(self): + self.handler.get_backends() + self.handler.forc_connector.get_backends.assert_called_once() + return self.forc_connector.get_backends() + + def test_get_backends_by_owner(self): + self.handler.get_backends_by_owner(owner=USERNAME) + self.handler.forc_connector.get_backends_by_owner.assert_called_once_with(owner=USERNAME) + + def test_get_backends_by_template(self): + self.handler.get_backends_by_template(template=USERNAME) + self.handler.forc_connector.get_backends_by_template.assert_called_once_with(template=USERNAME) + + \ No newline at end of file From e2881db948b7b7d6093f01afddcf2bd2a3490ff6 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Thu, 4 Jan 2024 13:59:25 +0100 Subject: [PATCH 35/39] feat(UnitTests):added tests for virtualachinehandler --- .../test_virtualmachinehandler.py | 442 +++++++++++++++--- 1 file changed, 373 insertions(+), 69 deletions(-) diff --git a/simple_vm_client/test_virtualmachinehandler.py b/simple_vm_client/test_virtualmachinehandler.py index 15bbbc5..095c57b 100644 --- a/simple_vm_client/test_virtualmachinehandler.py +++ b/simple_vm_client/test_virtualmachinehandler.py @@ -1,12 +1,13 @@ import unittest -from unittest.mock import patch +from unittest.mock import MagicMock, patch -from simple_vm_client.VirtualMachineHandler import VirtualMachineHandler -from openstack.test import fakes -from openstack.image.v2 import image -from openstack.compute.v2 import server, flavor from openstack.block_storage.v2 import volume from openstack.block_storage.v2.snapshot import Snapshot +from openstack.compute.v2 import flavor, server +from openstack.image.v2 import image +from openstack.test import fakes + +from simple_vm_client.VirtualMachineHandler import VirtualMachineHandler IMAGES_LIST = list(fakes.generate_fake_resources(image.Image, 3)) IMAGE = fakes.generate_fake_resource(image.Image) @@ -27,7 +28,6 @@ class TestVirtualMachineHandler(unittest.TestCase): - @patch("simple_vm_client.VirtualMachineHandler.OpenStackConnector") @patch("simple_vm_client.VirtualMachineHandler.BibigridConnector") @patch("simple_vm_client.VirtualMachineHandler.ForcConnector") @@ -39,13 +39,17 @@ def test_get_images(self, converter): self.handler.openstack_connector.get_images.return_value = IMAGES_LIST self.handler.get_images() self.handler.openstack_connector.get_images.assert_called_once() - converter.os_to_thrift_images.assert_called_once_with(openstack_images=IMAGES_LIST) + converter.os_to_thrift_images.assert_called_once_with( + openstack_images=IMAGES_LIST + ) @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") def test_get_image(self, converter): self.handler.openstack_connector.get_image.return_value = IMAGE self.handler.get_image("image_id") - self.handler.openstack_connector.get_image.assert_called_once_with(name_or_id="image_id", ignore_not_active=False) + self.handler.openstack_connector.get_image.assert_called_once_with( + name_or_id="image_id", ignore_not_active=False + ) converter.os_to_thrift_image.assert_called_once_with(openstack_image=IMAGE) @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") @@ -53,27 +57,35 @@ def test_get_public_images(self, converter): self.handler.openstack_connector.get_public_images.return_value = IMAGES_LIST self.handler.get_public_images() self.handler.openstack_connector.get_public_images.assert_called_once_with() - converter.os_to_thrift_images.assert_called_once_with(openstack_images=IMAGES_LIST) + converter.os_to_thrift_images.assert_called_once_with( + openstack_images=IMAGES_LIST + ) @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") def test_get_private_images(self, converter): self.handler.openstack_connector.get_private_images.return_value = IMAGES_LIST self.handler.get_private_images() self.handler.openstack_connector.get_private_images.assert_called_once_with() - converter.os_to_thrift_images.assert_called_once_with(openstack_images=IMAGES_LIST) + converter.os_to_thrift_images.assert_called_once_with( + openstack_images=IMAGES_LIST + ) @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") def test_get_flavors(self, converter): self.handler.openstack_connector.get_flavors.return_value = FLAVORS_LIST self.handler.get_flavors() self.handler.openstack_connector.get_flavors.assert_called_once_with() - converter.os_to_thrift_flavors.assert_called_once_with(openstack_flavors=FLAVORS_LIST) + converter.os_to_thrift_flavors.assert_called_once_with( + openstack_flavors=FLAVORS_LIST + ) @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") def test_get_volume(self, converter): self.handler.openstack_connector.get_volume.return_value = VOLUME self.handler.get_volume("volume_id") - self.handler.openstack_connector.get_volume.assert_called_once_with(name_or_id="volume_id") + self.handler.openstack_connector.get_volume.assert_called_once_with( + name_or_id="volume_id" + ) converter.os_to_thrift_volume.assert_called_once_with(openstack_volume=VOLUME) @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") @@ -81,7 +93,9 @@ def test_get_volumes_by_ids(self, converter): self.handler.openstack_connector.get_volume.side_effect = VOLUME_LIST self.handler.get_volumes_by_ids([vol.id for vol in VOLUME_LIST]) for vol in VOLUME_LIST: - self.handler.openstack_connector.get_volume.assert_any_call(name_or_id=vol.id) + self.handler.openstack_connector.get_volume.assert_any_call( + name_or_id=vol.id + ) converter.os_to_thrift_volume.assert_any_call(openstack_volume=vol) @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") @@ -101,7 +115,9 @@ def test_import_keypair(self): key_name = "key" pub_key = "pub" self.handler.import_keypair(keyname=key_name, public_key=pub_key) - self.handler.openstack_connector.import_keypair.assert_called_once_with(keyname=key_name, public_key=pub_key) + self.handler.openstack_connector.import_keypair.assert_called_once_with( + keyname=key_name, public_key=pub_key + ) def test_exist_server(self) -> bool: name = "test" @@ -111,32 +127,46 @@ def test_exist_server(self) -> bool: def test_get_vm_ports(self): openstack_id = "vm_id" self.handler.get_vm_ports(openstack_id=openstack_id) - self.handler.openstack_connector.get_vm_ports.assert_called_once_with(openstack_id=openstack_id) + self.handler.openstack_connector.get_vm_ports.assert_called_once_with( + openstack_id=openstack_id + ) def test_stop_server(self): openstack_id = "vm_id" self.handler.stop_server(openstack_id=openstack_id) - self.handler.openstack_connector.stop_server.assert_called_once_with(openstack_id=openstack_id) + self.handler.openstack_connector.stop_server.assert_called_once_with( + openstack_id=openstack_id + ) def test_delete_server(self) -> None: self.handler.delete_server(openstack_id=OPENSTACK_ID) - self.handler.openstack_connector.delete_server.assert_called_once_with(openstack_id=OPENSTACK_ID) + self.handler.openstack_connector.delete_server.assert_called_once_with( + openstack_id=OPENSTACK_ID + ) def test_reboot_hard_server(self) -> None: self.handler.reboot_hard_server(openstack_id=OPENSTACK_ID) - self.handler.openstack_connector.reboot_hard_server.assert_called_once_with(openstack_id=OPENSTACK_ID) + self.handler.openstack_connector.reboot_hard_server.assert_called_once_with( + openstack_id=OPENSTACK_ID + ) def test_reboot_soft_server(self) -> None: self.handler.reboot_soft_server(openstack_id=OPENSTACK_ID) - self.handler.openstack_connector.reboot_soft_server.assert_called_once_with(openstack_id=OPENSTACK_ID) + self.handler.openstack_connector.reboot_soft_server.assert_called_once_with( + openstack_id=OPENSTACK_ID + ) def test_resume_server(self) -> None: self.handler.resume_server(openstack_id=OPENSTACK_ID) - self.handler.openstack_connector.resume_server.assert_called_once_with(openstack_id=OPENSTACK_ID) + self.handler.openstack_connector.resume_server.assert_called_once_with( + openstack_id=OPENSTACK_ID + ) def test_set_server_metadata(self): self.handler.set_server_metadata(openstack_id=OPENSTACK_ID, metadata=METADATA) - self.handler.openstack_connector.set_server_metadata.assert_called_once_with(openstack_id=OPENSTACK_ID, metadata=METADATA) + self.handler.openstack_connector.set_server_metadata.assert_called_once_with( + openstack_id=OPENSTACK_ID, metadata=METADATA + ) @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") def test_get_server(self, converter): @@ -144,8 +174,12 @@ def test_get_server(self, converter): self.handler.forc_connector.get_playbook_status.return_value = SERVER self.handler.get_server(openstack_id=OPENSTACK_ID) - self.handler.openstack_connector.get_server.assert_called_once_with(openstack_id=OPENSTACK_ID) - self.handler.forc_connector.get_playbook_status.assert_called_once_with(server=SERVER) + self.handler.openstack_connector.get_server.assert_called_once_with( + openstack_id=OPENSTACK_ID + ) + self.handler.forc_connector.get_playbook_status.assert_called_once_with( + server=SERVER + ) converter.os_to_thrift_server.assert_called_once_with(openstack_server=SERVER) @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") @@ -153,8 +187,8 @@ def test_get_servers(self, converter): self.handler.openstack_connector.get_servers.return_value = SERVER_LIST self.handler.forc_connector.get_playbook_status.side_effect = SERVER_LIST self.handler.get_servers() - for server in SERVER_LIST: - self.handler.forc_connector.get_playbook_status.assert_any_call(server=server) + for svr in SERVER_LIST: + self.handler.forc_connector.get_playbook_status.assert_any_call(server=svr) converter.os_to_thrift_servers.assert_any_call(openstack_servers=SERVER_LIST) @@ -164,112 +198,382 @@ def test_get_servers_by_ids(self, converter): self.handler.openstack_connector.get_servers_by_ids.return_value = SERVER_LIST self.handler.get_servers_by_ids(server_ids=ids) - self.handler.openstack_connector.get_servers_by_ids.assert_called_once_with(ids=ids) + self.handler.openstack_connector.get_servers_by_ids.assert_called_once_with( + ids=ids + ) converter.os_to_thrift_servers.assert_any_call(openstack_servers=SERVER_LIST) @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") def test_get_servers_by_bibigrid_id(self, converter): - self.handler.openstack_connector.get_servers_by_bibigrid_id.return_value = SERVER_LIST + self.handler.openstack_connector.get_servers_by_bibigrid_id.return_value = ( + SERVER_LIST + ) self.handler.get_servers_by_bibigrid_id(bibigrid_id=BIBIGIRD_ID) - self.handler.openstack_connector.get_servers_by_bibigrid_id.assert_called_once_with(bibigrid_id=BIBIGIRD_ID) - converter.os_to_thrift_servers.assert_called_once_with(openstack_servers=SERVER_LIST) + self.handler.openstack_connector.get_servers_by_bibigrid_id.assert_called_once_with( + bibigrid_id=BIBIGIRD_ID + ) + converter.os_to_thrift_servers.assert_called_once_with( + openstack_servers=SERVER_LIST + ) def test_get_playbook_logs(self): self.handler.get_playbook_logs(openstack_id=OPENSTACK_ID) - self.handler.forc_connector.get_playbook_logs.assert_called_once_with(openstack=OPENSTACK_ID) + self.handler.forc_connector.get_playbook_logs.assert_called_once_with( + openstack_id=OPENSTACK_ID + ) def test_has_forc(self): self.handler.has_forc() self.handler.forc_connector.has_forc.assert_called_once() - def test_get_forc_url(self) -> str: + def test_get_forc_url(self): self.handler.get_forc_url() - self.handler.forc_connector.get_forc_url.assert_called_once() - - def test_create_snapshot(self) -> str: - self.handler.create_snapshot(openstack_id=OPENSTACK_ID, name=NAME, username=USERNAME, base_tags=[], description=DESCRIPTION) - self.handler.openstack_connector.create_snapshot.assert_called_once_with(openstack_id=OPENSTACK_ID, name=NAME, username=USERNAME, - base_tags=[], description=DESCRIPTION) + self.handler.forc_connector.get_forc_access_url.assert_called_once() + + def test_create_snapshot(self): + self.handler.create_snapshot( + openstack_id=OPENSTACK_ID, + name=NAME, + username=USERNAME, + base_tags=[], + description=DESCRIPTION, + ) + self.handler.openstack_connector.create_snapshot.assert_called_once_with( + openstack_id=OPENSTACK_ID, + name=NAME, + username=USERNAME, + base_tags=[], + description=DESCRIPTION, + ) def test_delete_image(self) -> None: self.handler.delete_image(image_id=OPENSTACK_ID) - self.handler.openstack_connector.delete_image.assert_called_once_with(image_id=OPENSTACK_ID) + self.handler.openstack_connector.delete_image.assert_called_once_with( + image_id=OPENSTACK_ID + ) @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") def test_create_volume(self, converter): self.handler.openstack_connector.create_volume.return_value = VOLUME - self.handler.create_volume(volume_name=NAME, volume_storage=STORAGE, metadata=METADATA) - self.handler.openstack_connector.create_volume.assert_called_once_with(volume_name=NAME, volume_storage=STORAGE, metadata=METADATA) + self.handler.create_volume( + volume_name=NAME, volume_storage=STORAGE, metadata=METADATA + ) + self.handler.openstack_connector.create_volume.assert_called_once_with( + volume_name=NAME, volume_storage=STORAGE, metadata=METADATA + ) converter.os_to_thrift_volume.assert_called_once_with(openstack_volume=VOLUME) @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") def test_create_volume_by_source_volume(self, converter): - self.handler.openstack_connector.create_volume_by_source_volume.return_value = VOLUME - self.handler.create_volume_by_source_volume(source_volume_id=OPENSTACK_ID, volume_name=NAME, metadata=METADATA) - self.handler.openstack_connector.create_volume_by_source_volume.assert_called_once_with(source_volume_id=OPENSTACK_ID, - volume_name=NAME, metadata=METADATA) + self.handler.openstack_connector.create_volume_by_source_volume.return_value = ( + VOLUME + ) + self.handler.create_volume_by_source_volume( + source_volume_id=OPENSTACK_ID, volume_name=NAME, metadata=METADATA + ) + self.handler.openstack_connector.create_volume_by_source_volume.assert_called_once_with( + source_volume_id=OPENSTACK_ID, volume_name=NAME, metadata=METADATA + ) converter.os_to_thrift_volume.assert_called_once_with(openstack_volume=VOLUME) @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") def test_create_volume_by_volume_snap(self, converter): - self.handler.openstack_connector.create_volume_by_volume_snap.return_value = VOLUME - self.handler.create_volume_by_volume_snap(volume_snap_id=OPENSTACK_ID, volume_name=NAME, metadata=METADATA) - self.handler.openstack_connector.create_volume_by_volume_snap.assert_called_once_with(source_volume_id=OPENSTACK_ID, - volume_name=NAME, metadata=METADATA) + self.handler.openstack_connector.create_volume_by_volume_snap.return_value = ( + VOLUME + ) + self.handler.create_volume_by_volume_snap( + volume_snap_id=OPENSTACK_ID, volume_name=NAME, metadata=METADATA + ) + self.handler.openstack_connector.create_volume_by_volume_snap.assert_called_once_with( + volume_snap_id=OPENSTACK_ID, volume_name=NAME, metadata=METADATA + ) converter.os_to_thrift_volume.assert_called_once_with(openstack_volume=VOLUME) def test_create_volume_snapshot(self): - self.handler.create_volume_snapshot(volume_id=OPENSTACK_ID, name=NAME, description=DESCRIPTION) - self.handler.openstack_connector.create_volume_snapshot.assert_called_once_with(volume_id=OPENSTACK_ID, name=NAME, - description=DESCRIPTION) + self.handler.create_volume_snapshot( + volume_id=OPENSTACK_ID, name=NAME, description=DESCRIPTION + ) + self.handler.openstack_connector.create_volume_snapshot.assert_called_once_with( + volume_id=OPENSTACK_ID, name=NAME, description=DESCRIPTION + ) @patch("simple_vm_client.VirtualMachineHandler.thrift_converter") def test_get_volume_snapshot(self, converter): self.handler.openstack_connector.get_volume_snapshot.return_value = VOL_SNAP self.handler.get_volume_snapshot(snapshot_id=OPENSTACK_ID) - converter.os_to_thrift_volume_snapshot.assert_called_once_with(openstack_snapshot=VOL_SNAP) + converter.os_to_thrift_volume_snapshot.assert_called_once_with( + openstack_snapshot=VOL_SNAP + ) def test_delete_volume_snapshot(self): self.handler.delete_volume_snapshot(snapshot_id=OPENSTACK_ID) - self.handler.openstack_connector.delete_volume_snapshot.assert_called_once_with(snapshot_id=OPENSTACK_ID) + self.handler.openstack_connector.delete_volume_snapshot.assert_called_once_with( + snapshot_id=OPENSTACK_ID + ) def test_detach_volume(self): self.handler.detach_volume(volume_id=OPENSTACK_ID, server_id=OPENSTACK_ID) - self.handler.openstack_connector.detach_volume.assert_called_once_with(volume_id=OPENSTACK_ID, server_id=OPENSTACK_ID) + self.handler.openstack_connector.detach_volume.assert_called_once_with( + volume_id=OPENSTACK_ID, server_id=OPENSTACK_ID + ) def test_delete_volume(self): self.handler.delete_volume(volume_id=OPENSTACK_ID) - self.handler.openstack_connector.delete_volume.assert_called_once_with(snapshot_id=OPENSTACK_ID) + self.handler.openstack_connector.delete_volume.assert_called_once_with( + volume_id=OPENSTACK_ID + ) def test_attach_volume_to_server(self): - self.handler.attach_volume_to_server(openstack_id=OPENSTACK_ID, volume_id=OPENSTACK_ID) - self.handler.openstack_connector.attach_volume_to_server.assert_called_once_with(snapshot_id=OPENSTACK_ID) + self.handler.attach_volume_to_server( + openstack_id=OPENSTACK_ID, volume_id=OPENSTACK_ID + ) + self.handler.openstack_connector.attach_volume_to_server.assert_called_once_with( + openstack_id=OPENSTACK_ID, volume_id=OPENSTACK_ID + ) def test_get_limits(self): self.handler.get_limits() self.handler.openstack_connector.get_limits.assert_called_once() - def create_backend(self): - self.handler.create_backend(owner=USERNAME, user_key_url=USERNAME, template=USERNAME, upstream_url=USERNAME) - self.handler.forc_connector.create_backend.assert_called_once_with(owner=USERNAME, user_key_url=USERNAME, template=USERNAME, - upstream_url=USERNAME) - - def test_delete_backend(self, id: str) -> None: + def test_create_backend(self): + self.handler.create_backend( + owner=USERNAME, + user_key_url=USERNAME, + template=USERNAME, + upstream_url=USERNAME, + ) + self.handler.forc_connector.create_backend.assert_called_once_with( + owner=USERNAME, + user_key_url=USERNAME, + template=USERNAME, + upstream_url=USERNAME, + ) + + def test_delete_backend(self): self.handler.delete_backend(id=OPENSTACK_ID) - self.handler.forc_connector.delete_backend.assert_called_once_with(id=OPENSTACK_ID) + self.handler.forc_connector.delete_backend.assert_called_once_with( + backend_id=OPENSTACK_ID + ) def test_get_backends(self): self.handler.get_backends() self.handler.forc_connector.get_backends.assert_called_once() - return self.forc_connector.get_backends() def test_get_backends_by_owner(self): self.handler.get_backends_by_owner(owner=USERNAME) - self.handler.forc_connector.get_backends_by_owner.assert_called_once_with(owner=USERNAME) + self.handler.forc_connector.get_backends_by_owner.assert_called_once_with( + owner=USERNAME + ) def test_get_backends_by_template(self): self.handler.get_backends_by_template(template=USERNAME) - self.handler.forc_connector.get_backends_by_template.assert_called_once_with(template=USERNAME) - - \ No newline at end of file + self.handler.forc_connector.get_backends_by_template.assert_called_once_with( + template=USERNAME + ) + + def test_get_backend_by_id(self): + self.handler.get_backend_by_id(id=OPENSTACK_ID) + self.handler.forc_connector.get_backend_by_id.assert_called_once_with( + id=OPENSTACK_ID + ) + + def test_add_user_to_backend(self): + self.handler.add_user_to_backend(backend_id=OPENSTACK_ID, user_id=USERNAME) + self.handler.forc_connector.add_user_to_backend.assert_called_once_with( + backend_id=OPENSTACK_ID, user_id=USERNAME + ) + + def test_get_users_from_backend(self): + self.handler.get_users_from_backend(backend_id=OPENSTACK_ID) + self.handler.forc_connector.get_users_from_backend.assert_called_once_with( + backend_id=OPENSTACK_ID + ) + + def test_delete_user_from_backend(self): + self.handler.delete_user_from_backend(backend_id=OPENSTACK_ID, user_id=USERNAME) + self.handler.forc_connector.delete_user_from_backend.assert_called_once_with( + backend_id=OPENSTACK_ID, user_id=USERNAME + ) + + def test_get_allowed_templates(self): + self.handler.get_allowed_templates() + self.handler.forc_connector.template.get_allowed_templates.assert_called_once() + + def test_delete_security_group_rule(self): + self.handler.delete_security_group_rule(openstack_id=OPENSTACK_ID) + self.handler.openstack_connector.delete_security_group_rule.assert_called_once_with( + openstack_id=OPENSTACK_ID + ) + + def test_open_port_range_for_vm_in_project(self): + self.handler.open_port_range_for_vm_in_project( + range_start=1000, range_stop=1000, openstack_id=OPENSTACK_ID + ) + self.handler.openstack_connector.open_port_range_for_vm_in_project.assert_called_once_with( + range_start=1000, + range_stop=1000, + openstack_id=OPENSTACK_ID, + ethertype="IPv4", + protocol="TCP", + ) + + def test_add_udp_security_group(self): + self.handler.add_udp_security_group(server_id=OPENSTACK_ID) + self.handler.openstack_connector.add_udp_security_group.assert_called_once_with( + server_id=OPENSTACK_ID + ) + + def test_start_server(self): + self.handler.start_server( + flavor_name=FLAVOR.name, + image_name=IMAGE.name, + public_key="pub", + servername=SERVER.name, + metadata=METADATA, + volume_ids_path_new=[], + volume_ids_path_attach=[], + additional_keys=[], + research_environment="", + additional_security_group_ids=[], + ) + self.handler.openstack_connector.start_server.assert_called_once_with( + flavor_name=FLAVOR.name, + image_name=IMAGE.name, + public_key="pub", + servername=SERVER.name, + metadata=METADATA, + volume_ids_path_new=[], + volume_ids_path_attach=[], + additional_keys=[], + research_environment_metadata=None, + additional_security_group_ids=[], + ) + + def test_start_server_with_custom_key(self): + self.handler.openstack_connector.start_server_with_playbook.return_value = ( + SERVER.id, + "priv", + ) + self.handler.start_server_with_custom_key( + flavor_name=FLAVOR.name, + image_name=IMAGE.name, + servername=SERVER.name, + metadata=METADATA, + volume_ids_path_new=[], + volume_ids_path_attach=[], + research_environment="", + additional_security_group_ids=[], + ) + self.handler.openstack_connector.start_server_with_playbook.assert_called_once_with( + flavor_name=FLAVOR.name, + image_name=IMAGE.name, + servername=SERVER.name, + metadata=METADATA, + volume_ids_path_new=[], + volume_ids_path_attach=[], + additional_security_group_ids=[], + research_environment_metadata=None, + ) + self.handler.forc_connector.set_vm_wait_for_playbook.assert_called_once_with( + openstack_id=SERVER.id, private_key="priv", name=SERVER.name + ) + + def test_create_and_deploy_playbook(self): + self.handler.openstack_connector.get_vm_ports.return_value = { + "port": str(20), + "udp": str(25), + } + self.handler.openstack_connector.get_gateway_ip.return_value = { + "gateway_ip": "192.168.0.1" + } + self.handler.openstack_connector.CLOUD_SITE = "bielefeld" + + self.handler.create_and_deploy_playbook( + public_key="pub", + openstack_id=OPENSTACK_ID, + conda_packages=[], + research_environment_template=USERNAME, + apt_packages=[], + create_only_backend=False, + base_url=USERNAME, + ) + + self.handler.openstack_connector.get_vm_ports.assert_called_once_with( + openstack_id=OPENSTACK_ID + ) + self.handler.openstack_connector.get_gateway_ip.assert_called_once() + self.handler.forc_connector.create_and_deploy_playbook.assert_called_once_with( + public_key="pub", + openstack_id=OPENSTACK_ID, + conda_packages=[], + research_environment_template=USERNAME, + apt_packages=[], + port=20, + cloud_site="bielefeld", + ip="192.168.0.1", + create_only_backend=False, + base_url=USERNAME, + ) + + def test_is_bibigrid_available(self): + self.handler.is_bibigrid_available() + self.handler.bibigrid_connector.is_bibigrid_available.assert_called_once() + + def test_get_cluster_info(self): + self.handler.get_cluster_info(cluster_id=OPENSTACK_ID) + self.handler.bibigrid_connector.get_cluster_info.assert_called_once_with( + cluster_id=OPENSTACK_ID + ) + + def test_get_cluster_status(self): + self.handler.get_cluster_status(cluster_id=OPENSTACK_ID) + self.handler.bibigrid_connector.get_cluster_status.assert_called_once_with( + cluster_id=OPENSTACK_ID + ) + + def test_start_cluster(self): + master = MagicMock() + worker_instances = [MagicMock()] + self.handler.start_cluster( + public_key="pub", + master_instance=master, + worker_instances=worker_instances, + user=USERNAME, + ) + self.handler.bibigrid_connector.start_cluster.assert_called_once_with( + public_key="pub", + master_instance=master, + worker_instances=worker_instances, + user=USERNAME, + ) + + def test_terminate_cluster(self): + self.handler.terminate_cluster(cluster_id=OPENSTACK_ID) + self.handler.bibigrid_connector.terminate_cluster.assert_called_once_with( + cluster_id=OPENSTACK_ID + ) + + def test_add_cluster_machine(self): + self.handler.add_cluster_machine( + cluster_id=OPENSTACK_ID, + cluster_user=USERNAME, + cluster_group_id=["test"], + image_name=IMAGE.name, + flavor_name=FLAVOR.name, + name=NAME, + key_name=NAME, + batch_idx=1, + worker_idx=1, + ) + self.handler.openstack_connector.add_cluster_machine.assert_called_once_with( + cluster_id=OPENSTACK_ID, + cluster_user=USERNAME, + cluster_group_id=["test"], + image_name=IMAGE.name, + flavor_name=FLAVOR.name, + name=NAME, + key_name=NAME, + batch_idx=1, + worker_idx=1, + ) From 4b7eb244ab68b2d55f15cde1762c3afb2489eadc Mon Sep 17 00:00:00 2001 From: dweinholz Date: Thu, 4 Jan 2024 14:12:07 +0100 Subject: [PATCH 36/39] handler 100% --- simple_vm_client/VirtualMachineHandler.py | 158 +++++++++--------- .../test_virtualmachinehandler.py | 90 ++++++++++ 2 files changed, 166 insertions(+), 82 deletions(-) diff --git a/simple_vm_client/VirtualMachineHandler.py b/simple_vm_client/VirtualMachineHandler.py index 9e25c37..1e348a9 100644 --- a/simple_vm_client/VirtualMachineHandler.py +++ b/simple_vm_client/VirtualMachineHandler.py @@ -5,7 +5,6 @@ """ from __future__ import annotations -from typing import TYPE_CHECKING from simple_vm_client.bibigrid_connector.bibigrid_connector import BibigridConnector from simple_vm_client.forc_connector.forc_connector import ForcConnector @@ -13,23 +12,21 @@ from simple_vm_client.util import thrift_converter from simple_vm_client.util.logger import setup_custom_logger +from .ttypes import ( + VM, + Backend, + ClusterInfo, + ClusterInstance, + CondaPackage, + Flavor, + Image, + PlaybookResult, + ResearchEnvironmentTemplate, + Snapshot, + Volume, +) from .VirtualMachineService import Iface -if TYPE_CHECKING: - from ttypes import ( - VM, - Backend, - ClusterInfo, - ClusterInstance, - CondaPackage, - Flavor, - Image, - PlaybookResult, - ResearchEnvironmentTemplate, - Snapshot, - Volume, - ) - logger = setup_custom_logger(__name__) @@ -144,15 +141,12 @@ def get_server(self, openstack_id: str) -> VM: return server def get_servers(self) -> list[VM]: - servers = openstack_servers = self.openstack_connector.get_servers() - servers_full=[] + servers_full = [] for server in servers: servers_full.append(self.forc_connector.get_playbook_status(server=server)) - serv = thrift_converter.os_to_thrift_servers( - openstack_servers=servers - ) + serv = thrift_converter.os_to_thrift_servers(openstack_servers=servers) return servers_full def get_servers_by_ids(self, server_ids: list[str]) -> list[VM]: @@ -179,12 +173,12 @@ def get_forc_url(self) -> str: return self.forc_connector.get_forc_access_url() def create_snapshot( - self, - openstack_id: str, - name: str, - username: str, - base_tags: list[str], - description: str, + self, + openstack_id: str, + name: str, + username: str, + base_tags: list[str], + description: str, ) -> str: return self.openstack_connector.create_snapshot( openstack_id=openstack_id, @@ -198,7 +192,7 @@ def delete_image(self, image_id: str) -> None: return self.openstack_connector.delete_image(image_id=image_id) def create_volume( - self, volume_name: str, volume_storage: int, metadata: dict[str, str] + self, volume_name: str, volume_storage: int, metadata: dict[str, str] ) -> Volume: return thrift_converter.os_to_thrift_volume( openstack_volume=self.openstack_connector.create_volume( @@ -209,7 +203,7 @@ def create_volume( ) def create_volume_by_source_volume( - self, volume_name: str, metadata: dict[str, str], source_volume_id: str + self, volume_name: str, metadata: dict[str, str], source_volume_id: str ) -> Volume: return thrift_converter.os_to_thrift_volume( openstack_volume=self.openstack_connector.create_volume_by_source_volume( @@ -220,7 +214,7 @@ def create_volume_by_source_volume( ) def create_volume_by_volume_snap( - self, volume_name: str, metadata: dict[str, str], volume_snap_id: str + self, volume_name: str, metadata: dict[str, str], volume_snap_id: str ) -> Volume: return thrift_converter.os_to_thrift_volume( openstack_volume=self.openstack_connector.create_volume_by_volume_snap( @@ -231,7 +225,7 @@ def create_volume_by_volume_snap( ) def create_volume_snapshot( - self, volume_id: str, name: str, description: str + self, volume_id: str, name: str, description: str ) -> str: return self.openstack_connector.create_volume_snapshot( volume_id=volume_id, name=name, description=description @@ -256,7 +250,7 @@ def delete_volume(self, volume_id: str) -> None: return self.openstack_connector.delete_volume(volume_id=volume_id) def attach_volume_to_server( - self, openstack_id: str, volume_id: str + self, openstack_id: str, volume_id: str ) -> dict[str, str]: return self.openstack_connector.attach_volume_to_server( openstack_id=openstack_id, volume_id=volume_id @@ -266,7 +260,7 @@ def get_limits(self) -> dict[str, str]: return self.openstack_connector.get_limits() def create_backend( - self, owner: str, user_key_url: str, template: str, upstream_url: str + self, owner: str, user_key_url: str, template: str, upstream_url: str ) -> Backend: return self.forc_connector.create_backend( owner=owner, @@ -312,12 +306,12 @@ def delete_security_group_rule(self, openstack_id): ) def open_port_range_for_vm_in_project( - self, - range_start, - range_stop, - openstack_id, - ethertype: str = "IPv4", - protocol: str = "TCP", + self, + range_start, + range_stop, + openstack_id, + ethertype: str = "IPv4", + protocol: str = "TCP", ) -> str: return self.openstack_connector.open_port_range_for_vm_in_project( range_start=range_start, @@ -331,17 +325,17 @@ def add_udp_security_group(self, server_id: str) -> None: return self.openstack_connector.add_udp_security_group(server_id=server_id) def start_server( - self, - flavor_name: str, - image_name: str, - public_key: str, - servername: str, - metadata: dict[str, str], - volume_ids_path_new: list[dict[str, str]], - volume_ids_path_attach: list[dict[str, str]], - additional_keys: list[str], - research_environment: str, - additional_security_group_ids: list[str], + self, + flavor_name: str, + image_name: str, + public_key: str, + servername: str, + metadata: dict[str, str], + volume_ids_path_new: list[dict[str, str]], + volume_ids_path_attach: list[dict[str, str]], + additional_keys: list[str], + research_environment: str, + additional_security_group_ids: list[str], ) -> str: if research_environment: research_environment_metadata = ( @@ -365,15 +359,15 @@ def start_server( ) def start_server_with_custom_key( - self, - flavor_name: str, - image_name: str, - servername: str, - metadata: dict[str, str], - research_environment: str, - volume_ids_path_new: list[dict[str, str]], - volume_ids_path_attach: list[dict[str, str]], - additional_security_group_ids: list[str], + self, + flavor_name: str, + image_name: str, + servername: str, + metadata: dict[str, str], + research_environment: str, + volume_ids_path_new: list[dict[str, str]], + volume_ids_path_attach: list[dict[str, str]], + additional_security_group_ids: list[str], ) -> str: if research_environment: research_environment_metadata = ( @@ -399,14 +393,14 @@ def start_server_with_custom_key( return openstack_id def create_and_deploy_playbook( - self, - public_key: str, - openstack_id: str, - conda_packages: list[CondaPackage], - research_environment_template: str, - apt_packages: list[str], - create_only_backend: bool, - base_url: str = "", + self, + public_key: str, + openstack_id: str, + conda_packages: list[CondaPackage], + research_environment_template: str, + apt_packages: list[str], + create_only_backend: bool, + base_url: str = "", ) -> int: port = int( self.openstack_connector.get_vm_ports(openstack_id=openstack_id)["port"] @@ -436,11 +430,11 @@ def get_cluster_status(self, cluster_id: str) -> dict[str, str]: return self.bibigrid_connector.get_cluster_status(cluster_id=cluster_id) def start_cluster( - self, - public_key: str, - master_instance: ClusterInstance, - worker_instances: list[ClusterInstance], - user: str, + self, + public_key: str, + master_instance: ClusterInstance, + worker_instances: list[ClusterInstance], + user: str, ) -> dict[str, str]: return self.bibigrid_connector.start_cluster( public_key=public_key, @@ -453,16 +447,16 @@ def terminate_cluster(self, cluster_id: str) -> dict[str, str]: return self.bibigrid_connector.terminate_cluster(cluster_id=cluster_id) def add_cluster_machine( - self, - cluster_id: str, - cluster_user: str, - cluster_group_id: list[str], - image_name: str, - flavor_name: str, - name: str, - key_name: str, - batch_idx: int, - worker_idx: int, + self, + cluster_id: str, + cluster_user: str, + cluster_group_id: list[str], + image_name: str, + flavor_name: str, + name: str, + key_name: str, + batch_idx: int, + worker_idx: int, ) -> str: return self.openstack_connector.add_cluster_machine( cluster_id=cluster_id, diff --git a/simple_vm_client/test_virtualmachinehandler.py b/simple_vm_client/test_virtualmachinehandler.py index 095c57b..060b23e 100644 --- a/simple_vm_client/test_virtualmachinehandler.py +++ b/simple_vm_client/test_virtualmachinehandler.py @@ -424,6 +424,36 @@ def test_add_udp_security_group(self): server_id=OPENSTACK_ID ) + def test_start_server_with_res(self): + self.handler.forc_connector.get_metadata_by_research_environment.return_value = ( + "res_metadata" + ) + + self.handler.start_server( + flavor_name=FLAVOR.name, + image_name=IMAGE.name, + public_key="pub", + servername=SERVER.name, + metadata=METADATA, + volume_ids_path_new=[], + volume_ids_path_attach=[], + additional_keys=[], + research_environment="de", + additional_security_group_ids=[], + ) + self.handler.openstack_connector.start_server.assert_called_once_with( + flavor_name=FLAVOR.name, + image_name=IMAGE.name, + public_key="pub", + servername=SERVER.name, + metadata=METADATA, + volume_ids_path_new=[], + volume_ids_path_attach=[], + additional_keys=[], + research_environment_metadata="res_metadata", + additional_security_group_ids=[], + ) + def test_start_server(self): self.handler.start_server( flavor_name=FLAVOR.name, @@ -479,6 +509,38 @@ def test_start_server_with_custom_key(self): openstack_id=SERVER.id, private_key="priv", name=SERVER.name ) + def test_start_server_with_custom_key_and_res(self): + self.handler.openstack_connector.start_server_with_playbook.return_value = ( + SERVER.id, + "priv", + ) + self.handler.forc_connector.get_metadata_by_research_environment.return_value = ( + "res_metadata" + ) + self.handler.start_server_with_custom_key( + flavor_name=FLAVOR.name, + image_name=IMAGE.name, + servername=SERVER.name, + metadata=METADATA, + volume_ids_path_new=[], + volume_ids_path_attach=[], + research_environment="de", + additional_security_group_ids=[], + ) + self.handler.openstack_connector.start_server_with_playbook.assert_called_once_with( + flavor_name=FLAVOR.name, + image_name=IMAGE.name, + servername=SERVER.name, + metadata=METADATA, + volume_ids_path_new=[], + volume_ids_path_attach=[], + additional_security_group_ids=[], + research_environment_metadata="res_metadata", + ) + self.handler.forc_connector.set_vm_wait_for_playbook.assert_called_once_with( + openstack_id=SERVER.id, private_key="priv", name=SERVER.name + ) + def test_create_and_deploy_playbook(self): self.handler.openstack_connector.get_vm_ports.return_value = { "port": str(20), @@ -577,3 +639,31 @@ def test_add_cluster_machine(self): batch_idx=1, worker_idx=1, ) + + def test_keyboard_interrupt_handler_playbooks(self): + mock_stop_a = MagicMock() + mock_stop_b = MagicMock() + mock_stop_c = MagicMock() + + self.handler.forc_connector._active_playbooks = { + "a": mock_stop_a, + "b": mock_stop_b, + "c": mock_stop_c, + } + self.handler.forc_connector.redis_connection.hget.side_effect = [ + "a".encode("utf-8"), + "b".encode("utf-8"), + "c".encode("utf-8"), + ] + with self.assertRaises(SystemExit): + self.handler.keyboard_interrupt_handler_playbooks() + for key in self.handler.forc_connector._active_playbooks.keys(): + self.handler.openstack_connector.delete_keypair.assert_any_call( + key_name=key + ) + self.handler.openstack_connector.delete_server.assert_any_call( + openstack_id=key + ) + mock_stop_a.stop.assert_called_once() + mock_stop_b.stop.assert_called_once() + mock_stop_c.stop.assert_called_once() From a84a3df7193de019f55bf4620179a66e8b67d356 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Thu, 4 Jan 2024 15:35:11 +0100 Subject: [PATCH 37/39] done --- .coveragerc | 4 + simple_vm_client/VirtualMachineServer.py | 11 - .../forc_connector/forc_connector.py | 42 +- .../forc_connector/test_forc_connector.py | 626 ++++++++++++++++-- 4 files changed, 591 insertions(+), 92 deletions(-) diff --git a/.coveragerc b/.coveragerc index 02e7226..132dc8a 100644 --- a/.coveragerc +++ b/.coveragerc @@ -6,6 +6,10 @@ omit = simple_vm_client/ttypes.py simple_vm_client/forc_connector/template/test_templates.py simple_vm_client/util/logger.py + simple_vm_client/forc_connector/test_forc_connector.py + simple_vm_client/VirtualMachineServer.py + + check_env.py [report] exclude_lines = diff --git a/simple_vm_client/VirtualMachineServer.py b/simple_vm_client/VirtualMachineServer.py index c797e2e..3820dcc 100644 --- a/simple_vm_client/VirtualMachineServer.py +++ b/simple_vm_client/VirtualMachineServer.py @@ -1,4 +1,3 @@ -import os import signal import ssl import sys @@ -73,15 +72,5 @@ def catch_shutdown(signal: int, frame: object) -> None: server.serve() -def check_environment_variables(envs: list[str]) -> None: - def check_env(var: str) -> None: - if var not in os.environ: - click.echo(f"ERROR: There is no {var} set in environment.") - click.echo("Please make sure you have sourced your OpenStack rc file") - sys.exit() - - list(map(lambda var: check_env(var), envs)) - - if __name__ == "__main__": startServer() diff --git a/simple_vm_client/forc_connector/forc_connector.py b/simple_vm_client/forc_connector/forc_connector.py index 1ee96b9..962a5e0 100644 --- a/simple_vm_client/forc_connector/forc_connector.py +++ b/simple_vm_client/forc_connector/forc_connector.py @@ -130,11 +130,12 @@ def delete_backend(self, backend_id: str) -> None: if response.status_code: if response.status_code == 404 or response.status_code == 500: try: + response_data = response.json() raise BackendNotFoundException( - message=str(json.dumps(response.json())), + message=str(json.dumps(response_data)), name_or_id=str(backend_id), ) - except json.JSONDecodeError: + except ValueError: logger.exception(str(response.content)) raise BackendNotFoundException( message=str(response.content), name_or_id=str(backend_id) @@ -146,14 +147,11 @@ def delete_backend(self, backend_id: str) -> None: def add_user_to_backend(self, backend_id: str, user_id: str) -> dict[str, str]: logger.info(f"Add User {user_id} to backend {backend_id}") - try: - post_url = f"{self.FORC_URL}users/{backend_id}" - user_info = { - "user": user_id, - } - except Exception as e: - logger.exception(e) - return {"Error": "Could not create url or json body."} + post_url = f"{self.FORC_URL}users/{backend_id}" + user_info = { + "user": user_id, + } + try: response = requests.post( post_url, @@ -177,7 +175,9 @@ def add_user_to_backend(self, backend_id: str, user_id: str) -> dict[str, str]: logger.exception(e) raise BackendNotFoundException(message=str(e), name_or_id=backend_id) - def create_backend(self, owner: str, user_key_url: str, template: str, upstream_url: str) -> Backend: + def create_backend( + self, owner: str, user_key_url: str, template: str, upstream_url: str + ) -> Backend: logger.info( f"Create Backend - [Owner:{owner}, user_key_url:{user_key_url}, template:{template}, upstream_url:{upstream_url}" ) @@ -362,19 +362,17 @@ def load_env(self) -> None: logger.info("Load env: FORC") self.FORC_API_KEY = os.environ.get("FORC_API_KEY", None) - def get_playbook_logs(self, openstack_id: str) -> PlaybookResult: - logger.warning(f"Get Playbook logs {openstack_id}") - if ( + def is_playbook_active(self, openstack_id: str) -> bool: + return ( self.redis_connection.exists(openstack_id) == 1 and openstack_id in self._active_playbooks - ): - playbook = self._active_playbooks.get(openstack_id) - logger.warning(f"playbook {playbook}") - if not playbook: - raise PlaybookNotFoundException( - message=f"No active Playbook found for {openstack_id}!", - name_or_id=openstack_id, - ) + ) + + def get_playbook_logs(self, openstack_id: str) -> PlaybookResult: + logger.warning(f"Get Playbook logs {openstack_id}") + + if self.is_playbook_active(openstack_id): + playbook = self._active_playbooks[openstack_id] status, stdout, stderr = playbook.get_logs() logger.warning(f" Playbook logs {openstack_id} status: {status}") diff --git a/simple_vm_client/forc_connector/test_forc_connector.py b/simple_vm_client/forc_connector/test_forc_connector.py index 5a75231..e0ea849 100644 --- a/simple_vm_client/forc_connector/test_forc_connector.py +++ b/simple_vm_client/forc_connector/test_forc_connector.py @@ -1,15 +1,20 @@ -import json import os import tempfile import unittest -from unittest.mock import patch, MagicMock +from unittest.mock import MagicMock, patch import requests -from openstack.test import fakes from openstack.compute.v2.server import Server +from openstack.test import fakes from simple_vm_client.forc_connector.forc_connector import ForcConnector -from simple_vm_client.ttypes import BackendNotFoundException, DefaultException +from simple_vm_client.ttypes import ( + Backend, + BackendNotFoundException, + DefaultException, + PlaybookNotFoundException, + TemplateNotFoundException, +) from simple_vm_client.util.state_enums import VmTaskStates FORC_URL = "https://proxy-dev.bi.denbi.de:5000/" @@ -33,7 +38,6 @@ class TestForcConnector(unittest.TestCase): - @patch("simple_vm_client.forc_connector.forc_connector.redis.ConnectionPool") @patch("simple_vm_client.forc_connector.forc_connector.redis.Redis") @patch("simple_vm_client.forc_connector.forc_connector.Template") @@ -64,10 +68,10 @@ def test_init(self, mock_template, mock_redis, mock_connection_pool): forc_url=FORC_URL, forc_api_key=FORC_API_KEY, ) - mock_connection_pool.assert_called_with( - host=REDIS_HOST, port=REDIS_PORT + mock_connection_pool.assert_called_with(host=REDIS_HOST, port=REDIS_PORT) + mock_redis.assert_called_with( + connection_pool=mock_connection_pool.return_value, charset="utf-8" ) - mock_redis.assert_called_with(connection_pool=mock_connection_pool.return_value, charset="utf-8") def test_load_config(self): with tempfile.NamedTemporaryFile(mode="w+", delete=False) as temp_file: @@ -86,10 +90,16 @@ def test_load_config(self): @patch("simple_vm_client.forc_connector.forc_connector.redis.Redis") @patch("simple_vm_client.forc_connector.forc_connector.logger.info") @patch("simple_vm_client.forc_connector.forc_connector.logger.error") - def test_connect_to_redis(self, mock_logger_error, mock_logger_info, mock_redis, mock_redis_pool): + def test_connect_to_redis( + self, mock_logger_error, mock_logger_info, mock_redis, mock_redis_pool + ): self.forc_connector.connect_to_redis() - mock_redis_pool.assert_any_call(host=self.forc_connector.REDIS_HOST, port=self.forc_connector.REDIS_PORT) - mock_redis.asser_called_once_with(connection_pool=self.forc_connector.redis_pool, charset="utf-8") + mock_redis_pool.assert_any_call( + host=self.forc_connector.REDIS_HOST, port=self.forc_connector.REDIS_PORT + ) + mock_redis.asser_called_once_with( + connection_pool=self.forc_connector.redis_pool, charset="utf-8" + ) self.forc_connector.redis_connection.ping.return_value = True self.forc_connector.redis_connection.ping.assert_any_call() mock_logger_info.assert_any_call("Redis connection created!") @@ -105,7 +115,12 @@ def test_get_users_from_backend(self, mock_get): return_value.json.return_value = "data" mock_get.return_value = return_value result = self.forc_connector.get_users_from_backend(backend_id) - mock_get.assert_called_once_with(get_url, timeout=(30, 30), headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, verify=True) + mock_get.assert_called_once_with( + get_url, + timeout=(30, 30), + headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, + verify=True, + ) self.assertEqual(result, ["data"]) @patch("simple_vm_client.forc_connector.forc_connector.requests.get") @@ -115,7 +130,12 @@ def test_get_users_from_backend_401(self, mock_get): return_value = MagicMock(status_code=401, body={"data"}) mock_get.return_value = return_value result = self.forc_connector.get_users_from_backend(backend_id) - mock_get.assert_called_once_with(get_url, timeout=(30, 30), headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, verify=True) + mock_get.assert_called_once_with( + get_url, + timeout=(30, 30), + headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, + verify=True, + ) self.assertEqual(result, ["Error: 401"]) @patch("simple_vm_client.forc_connector.forc_connector.requests.get") @@ -125,7 +145,12 @@ def test_get_users_from_backend_timeout(self, mock_get): mock_get.side_effect = requests.Timeout("UNit Test") result = self.forc_connector.get_users_from_backend(backend_id) - mock_get.assert_called_once_with(get_url, timeout=(30, 30), headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, verify=True) + mock_get.assert_called_once_with( + get_url, + timeout=(30, 30), + headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, + verify=True, + ) self.assertEqual(result, []) @patch("simple_vm_client.forc_connector.forc_connector.requests.delete") @@ -192,13 +217,25 @@ def test_delete_user_from_backend_exception(self, mock_delete): verify=True, ) + @patch("simple_vm_client.forc_connector.forc_connector.requests.delete") + @patch("simple_vm_client.forc_connector.forc_connector.json") + def test_delete_backend_not_found_json(self, mock_json, mock_delete): + backend_id = "backend_id" + return_value = MagicMock(status_code=404) + return_value.json.return_value = {"data": "success"} + mock_json.dumps.side_effect = ValueError() + mock_delete.return_value = return_value + + with self.assertRaises(BackendNotFoundException): + self.forc_connector.delete_backend(backend_id) + @patch("simple_vm_client.forc_connector.forc_connector.requests.delete") def test_delete_backend(self, mock_delete): backend_id = "backend_id" delete_url = f"{self.forc_connector.FORC_URL}backends/{backend_id}" return_value = MagicMock(status_code=200) - return_value.json.return_value={"data": "success"} + return_value.json.return_value = {"data": "success"} mock_delete.return_value = return_value self.forc_connector.delete_backend(backend_id) @@ -216,7 +253,7 @@ def test_delete_backend_not_found(self, mock_delete): delete_url = f"{self.forc_connector.FORC_URL}backends/{backend_id}" return_value = MagicMock(status_code=404) - return_value.json.return_value={"error": "Backend not found"} + return_value.json.return_value = {"error": "Backend not found"} mock_delete.return_value = return_value with self.assertRaises(BackendNotFoundException): @@ -235,7 +272,7 @@ def test_delete_backend_server_error(self, mock_delete): delete_url = f"{self.forc_connector.FORC_URL}backends/{backend_id}" return_value = MagicMock(status_code=500) - return_value.json.return_value={"error": "Internal Server Error"} + return_value.json.return_value = {"error": "Internal Server Error"} mock_delete.return_value = return_value with self.assertRaises(BackendNotFoundException) as context: @@ -265,6 +302,15 @@ def test_delete_backend_timeout(self, mock_delete): verify=True, ) + @patch("simple_vm_client.forc_connector.forc_connector.requests.post") + def test_add_user_to_backend_backend_not_found(self, mock_post): + mock_response = MagicMock() + + mock_response.json.side_effect = Exception() + mock_post.return_value = mock_response + with self.assertRaises(BackendNotFoundException): + self.forc_connector.add_user_to_backend(backend_id="test", user_id="test") + @patch("simple_vm_client.forc_connector.forc_connector.requests.post") def test_add_user_to_backend(self, mock_post): # Create an instance of your class @@ -274,7 +320,9 @@ def test_add_user_to_backend(self, mock_post): mock_post.return_value = mock_response # Call the method you want to test - result = self.forc_connector.add_user_to_backend(backend_id="backend_id", user_id="user_id") + result = self.forc_connector.add_user_to_backend( + backend_id="backend_id", user_id="user_id" + ) # Assertions mock_post.assert_called_once_with( @@ -290,7 +338,9 @@ def test_add_user_to_backend(self, mock_post): def test_add_user_to_backend_timeout(self, mock_post): mock_post.side_effect = requests.Timeout("Unit Test") - result = self.forc_connector.add_user_to_backend(backend_id="backend_id", user_id="user_id") + result = self.forc_connector.add_user_to_backend( + backend_id="backend_id", user_id="user_id" + ) mock_post.assert_called_once_with( f"{self.forc_connector.FORC_URL}users/backend_id", @@ -305,9 +355,10 @@ def test_add_user_to_backend_timeout(self, mock_post): def test_add_user_to_backend_exception(self, mock_post): mock_post.side_effect = Exception("Unit Test") - with self.assertRaises(BackendNotFoundException): - self.forc_connector.add_user_to_backend(backend_id="backend_id", user_id="user_id") + self.forc_connector.add_user_to_backend( + backend_id="backend_id", user_id="user_id" + ) mock_post.assert_called_once_with( f"{self.forc_connector.FORC_URL}users/backend_id", @@ -326,7 +377,7 @@ def test_get_forc_url(self): self.assertEqual(result, self.forc_connector.FORC_URL) - def get_forc_access_url(self): + def test_get_forc_access_url(self): result = self.forc_connector.get_forc_access_url() self.assertEqual(result, self.forc_connector.FORC_ACCESS_URL) @@ -345,45 +396,502 @@ def test_set_vm_wait_for_playbook(self): openstack_id = "openstack_id" private_key = "priv" name = "name" - self.forc_connector.set_vm_wait_for_playbook(openstack_id=openstack_id, private_key=private_key, name=name) - self.forc_connector.redis_connection.hset.assert_called_once_with(name=openstack_id, - mapping=dict( - key=private_key, - name=name, - status=VmTaskStates.PREPARE_PLAYBOOK_BUILD.value, - ), ) + self.forc_connector.set_vm_wait_for_playbook( + openstack_id=openstack_id, private_key=private_key, name=name + ) + self.forc_connector.redis_connection.hset.assert_called_once_with( + name=openstack_id, + mapping=dict( + key=private_key, + name=name, + status=VmTaskStates.PREPARE_PLAYBOOK_BUILD.value, + ), + ) def test_get_playbook_status(self): - fake_server=fakes.generate_fake_resource(Server) - fake_server.task_state=None - fake_playbook=MagicMock() - self.forc_connector._active_playbooks[fake_server.id]=fake_playbook - self.forc_connector.redis_connection.exists.return_value=1 - self.forc_connector.redis_connection.hget.return_value=VmTaskStates.PREPARE_PLAYBOOK_BUILD.value.encode("utf-8") - result=self.forc_connector.get_playbook_status(server=fake_server) - self.assertEqual(result.task_state,VmTaskStates.PREPARE_PLAYBOOK_BUILD.value) - self.forc_connector.redis_connection.hget.return_value=VmTaskStates.BUILD_PLAYBOOK.value.encode("utf-8") - result=self.forc_connector.get_playbook_status(server=fake_server) - self.assertEqual(result.task_state,VmTaskStates.BUILD_PLAYBOOK.value) - self.forc_connector.redis_connection.hget.return_value=VmTaskStates.PLAYBOOK_FAILED.value.encode("utf-8") - result=self.forc_connector.get_playbook_status(server=fake_server) - self.assertEqual(result.task_state,VmTaskStates.PLAYBOOK_FAILED.value) - self.forc_connector.redis_connection.hget.return_value=VmTaskStates.PLAYBOOK_SUCCESSFUL.value.encode("utf-8") - result=self.forc_connector.get_playbook_status(server=fake_server) - self.assertEqual(result.task_state,VmTaskStates.PLAYBOOK_SUCCESSFUL.value) - + fake_server = fakes.generate_fake_resource(Server) + fake_server.task_state = None + fake_playbook = MagicMock() + self.forc_connector._active_playbooks[fake_server.id] = fake_playbook + self.forc_connector.redis_connection.exists.return_value = 1 + self.forc_connector.redis_connection.hget.return_value = ( + VmTaskStates.PREPARE_PLAYBOOK_BUILD.value.encode("utf-8") + ) + result = self.forc_connector.get_playbook_status(server=fake_server) + self.assertEqual(result.task_state, VmTaskStates.PREPARE_PLAYBOOK_BUILD.value) + self.forc_connector.redis_connection.hget.return_value = ( + VmTaskStates.BUILD_PLAYBOOK.value.encode("utf-8") + ) + result = self.forc_connector.get_playbook_status(server=fake_server) + self.assertEqual(result.task_state, VmTaskStates.BUILD_PLAYBOOK.value) + self.forc_connector.redis_connection.hget.return_value = ( + VmTaskStates.PLAYBOOK_FAILED.value.encode("utf-8") + ) + result = self.forc_connector.get_playbook_status(server=fake_server) + self.assertEqual(result.task_state, VmTaskStates.PLAYBOOK_FAILED.value) + self.forc_connector.redis_connection.hget.return_value = ( + VmTaskStates.PLAYBOOK_SUCCESSFUL.value.encode("utf-8") + ) + result = self.forc_connector.get_playbook_status(server=fake_server) + self.assertEqual(result.task_state, VmTaskStates.PLAYBOOK_SUCCESSFUL.value) @patch("simple_vm_client.forc_connector.forc_connector.Playbook") - def test_create_and_deploy_playbook(self,mock_playbook): - key="key" - openstack_id="openstack_id" - playbook_mock=MagicMock() - mock_playbook.return_value=playbook_mock - - self.forc_connector.redis_connection.hget.return_value=key.encode("utf-8") - res=self.forc_connector.create_and_deploy_playbook(public_key=key,research_environment_template="vscode",create_only_backend=False,conda_packages=[],apt_packages=[],openstack_id=openstack_id,port=80,ip="192.168.0.1",cloud_site="Bielefeld",base_url="base_url") - self.forc_connector.redis_connection.hset.assert_called_once_with(openstack_id,"status",VmTaskStates.BUILD_PLAYBOOK.value) - self.assertEqual(res,0) - active_play=self.forc_connector._active_playbooks[openstack_id] - self.assertEqual(active_play,playbook_mock) + def test_create_and_deploy_playbook(self, mock_playbook): + key = "key" + openstack_id = "openstack_id" + playbook_mock = MagicMock() + mock_playbook.return_value = playbook_mock + + self.forc_connector.redis_connection.hget.return_value = key.encode("utf-8") + res = self.forc_connector.create_and_deploy_playbook( + public_key=key, + research_environment_template="vscode", + create_only_backend=False, + conda_packages=[], + apt_packages=[], + openstack_id=openstack_id, + port=80, + ip="192.168.0.1", + cloud_site="Bielefeld", + base_url="base_url", + ) + self.forc_connector.redis_connection.hset.assert_called_once_with( + openstack_id, "status", VmTaskStates.BUILD_PLAYBOOK.value + ) + self.assertEqual(res, 0) + active_play = self.forc_connector._active_playbooks[openstack_id] + self.assertEqual(active_play, playbook_mock) + + @patch("simple_vm_client.forc_connector.forc_connector.requests.post") + @patch("simple_vm_client.forc_connector.forc_connector.Backend") + def test_create_backend(self, mock_backend, mock_post): + # Arrange + owner = "test_owner" + user_key_url = "test_key_url" + template = "test_template" + upstream_url = "test_upstream_url" + self.forc_connector.template.get_template_version_for.return_value = ( + "test_version" + ) + + mock_response = MagicMock() + mock_response.json.return_value = { + "id": 1, + "owner": owner, + "location_url": "test_location_url", + "template": template, + "template_version": "test_version", + } + mock_post.return_value = mock_response + + # Act + result = self.forc_connector.create_backend( + owner, user_key_url, template, upstream_url + ) + + # Assert + mock_post.assert_called_once_with( + f"{self.forc_connector.FORC_URL}backends", + json={ + "owner": owner, + "user_key_url": user_key_url, + "template": template, + "template_version": "test_version", + "upstream_url": upstream_url, + }, + timeout=(30, 30), + headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, + verify=True, + ) + + mock_response.json.assert_called_once() + mock_backend.assert_called_once_with( + id=1, + owner=owner, + location_url="test_location_url", + template=template, + template_version="test_version", + ) + + self.assertEqual(result, mock_backend.return_value) + + @patch( + "simple_vm_client.forc_connector.forc_connector.requests.post", + side_effect=requests.Timeout, + ) + def test_create_backend_timeout(self, mock_post): + # Arrange + owner = "test_owner" + user_key_url = "test_key_url" + template = "test_template" + upstream_url = "test_upstream_url" + + # Act & Assert + with self.assertRaises(DefaultException): + self.forc_connector.create_backend( + owner, user_key_url, template, upstream_url + ) + + mock_post.assert_called_once() + + @patch( + "simple_vm_client.forc_connector.forc_connector.requests.post", + side_effect=Exception("Test error"), + ) + def test_create_backend_exception(self, mock_post): + # Arrange + owner = "test_owner" + user_key_url = "test_key_url" + template = "test_template" + upstream_url = "test_upstream_url" + + # Act & Assert + with self.assertRaises(DefaultException): + self.forc_connector.create_backend( + owner, user_key_url, template, upstream_url + ) + + mock_post.assert_called_once() + + @patch("simple_vm_client.forc_connector.forc_connector.requests.get") + def test_get_backends(self, mock_get): + # Arrange + + mock_response = MagicMock() + mock_response.json.return_value = [ + { + "id": 1, + "owner": "test_owner", + "location_url": "test_location_url", + "template": "test_template", + "template_version": "test_version", + }, + { + "id": 2, + "owner": "another_owner", + "location_url": "another_location_url", + "template": "another_template", + "template_version": "another_version", + }, + ] + mock_get.return_value = mock_response + + # Act + result = self.forc_connector.get_backends() + + # Assert + mock_get.assert_called_once_with( + f"{self.forc_connector.FORC_URL}backends", + timeout=(30, 30), + headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, + verify=True, + ) + + mock_response.json.assert_called_once() + + expected_backends = [ + Backend( + id=1, + owner="test_owner", + location_url="test_location_url", + template="test_template", + template_version="test_version", + ), + Backend( + id=2, + owner="another_owner", + location_url="another_location_url", + template="another_template", + template_version="another_version", + ), + ] + + self.assertEqual(result, expected_backends) + + @patch( + "simple_vm_client.forc_connector.forc_connector.requests.get", + side_effect=requests.Timeout, + ) + def test_get_backends_timeout(self, mock_get): + # Arrange + + # Act & Assert + with self.assertRaises(DefaultException): + self.forc_connector.get_backends() + + mock_get.assert_called_once() + + def test_is_playbook_active(self) -> bool: + openstack_id = "openstack_id" + self.forc_connector.is_playbook_active(openstack_id=openstack_id) + self.forc_connector.redis_connection.exists.assert_called_once_with( + openstack_id + ) + + def test_get_playbook_logs(self): + openstack_id = "openstack_id" + self.forc_connector.redis_connection.exists.return_value = 1 + playbook_mock = MagicMock() + playbook_mock.get_logs.return_value = "status", "stdout", "stderr" + self.forc_connector._active_playbooks = {openstack_id: playbook_mock} + self.forc_connector.get_playbook_logs(openstack_id=openstack_id) + self.forc_connector.redis_connection.exists.assert_called_once_with( + openstack_id + ) + playbook_mock.get_logs.assert_called_once() + playbook_mock.cleanup.assert_called_once() + + def test_get_playbook_logs_no_playbook(self): + openstack_id = "openstack_id" + with self.assertRaises(PlaybookNotFoundException): + self.forc_connector.get_playbook_logs(openstack_id=openstack_id) + self.forc_connector.redis_connection.exists.assert_called_once_with( + openstack_id + ) + + def test_create_backend_template_exc(self): + self.forc_connector.template.get_template_version_for.return_value = None + with self.assertRaises(TemplateNotFoundException): + self.forc_connector.create_backend( + owner="dede", + user_key_url="dede", + template="not_found", + upstream_url="de", + ) + + @patch("simple_vm_client.forc_connector.forc_connector.requests.get") + def test_get_backend_sexc(self, mock_get): + mock_response = MagicMock(status_code=401) + mock_get.return_value = mock_response + with self.assertRaises(DefaultException): + self.forc_connector.get_backends() + + @patch("simple_vm_client.forc_connector.forc_connector.requests.get") + def test_get_backends_by_template_exc(self, mock_get): + mock_response = MagicMock(status_code=401) + mock_get.return_value = mock_response + with self.assertRaises(DefaultException): + self.forc_connector.get_backends_by_template(template="ds") + + @patch("simple_vm_client.forc_connector.forc_connector.requests.get") + def test_get_backends_by_template(self, mock_get): + # Arrange + template = "test_template" + + mock_response = MagicMock() + mock_response.json.return_value = [ + { + "id": 1, + "owner": "test_owner", + "location_url": "test_location_url", + "template": template, + "template_version": "test_version", + }, + { + "id": 2, + "owner": "another_owner", + "location_url": "another_location_url", + "template": template, + "template_version": "another_version", + }, + ] + mock_get.return_value = mock_response + + # Act + result = self.forc_connector.get_backends_by_template(template) + + # Assert + mock_get.assert_called_once_with( + f"{self.forc_connector.FORC_URL}backends/byTemplate/{template}", + timeout=(30, 30), + headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, + verify=True, + ) + + mock_response.json.assert_called_once() + + expected_backends = [ + Backend( + id=1, + owner="test_owner", + location_url="test_location_url", + template=template, + template_version="test_version", + ), + Backend( + id=2, + owner="another_owner", + location_url="another_location_url", + template=template, + template_version="another_version", + ), + ] + + self.assertEqual(result, expected_backends) + + @patch( + "simple_vm_client.forc_connector.forc_connector.requests.get", + side_effect=requests.Timeout, + ) + def test_get_backends_by_template_timeout(self, mock_get): + # Arrange + template = "test_template" + + # Act & Assert + with self.assertRaises(DefaultException): + self.forc_connector.get_backends_by_template(template) + + mock_get.assert_called_once() + + def test_get_metadata_by_research_environment(self): + metadata_Mock = MagicMock() + res_env = "testres" + + return_value = {res_env: metadata_Mock} + template_mock = MagicMock() + template_mock.loaded_research_env_metadata = {res_env: metadata_Mock} + self.forc_connector.template = template_mock + self.forc_connector.template.loaded_research_env_metadata = return_value + res = self.forc_connector.get_metadata_by_research_environment( + research_environment=res_env + ) + self.assertEqual(metadata_Mock, res) + + def test_get_metadata_by_research_environment_none(self): + res_env = "testres" + template_mock = MagicMock() + template_mock.loaded_research_env_metadata = None + res = self.forc_connector.get_metadata_by_research_environment( + research_environment=res_env + ) + self.assertEqual(res, None) + + def test_get_metadata_by_research_environment_none_two(self): + metadata_Mock = MagicMock() + + template_mock = MagicMock() + template_mock.loaded_research_env_metadata = {"dede": metadata_Mock} + self.forc_connector.template = template_mock + res = self.forc_connector.get_metadata_by_research_environment( + research_environment="user_key_url" + ) + self.assertEqual(None, None) + + @patch("simple_vm_client.forc_connector.forc_connector.requests.get") + def test_get_backend_by_id_exc(self, mock_get): + backend_id = "your_backend_id" + mock_response = MagicMock() + mock_response.json.side_effect = Exception() + mock_get.return_value = mock_response + + with self.assertRaises(Exception): + self.forc_connector.get_backend_by_id(backend_id) + + @patch("simple_vm_client.forc_connector.forc_connector.requests.get") + def test_get_backend_by_id(self, mock_get): + backend_id = "your_backend_id" + mock_response = MagicMock() + mock_response.json.return_value = { + "id": backend_id, + "owner": "owner", + "location_url": "location_url", + "template": "template", + "template_version": "template_version", + } + mock_get.return_value = mock_response + + result = self.forc_connector.get_backend_by_id(backend_id) + + mock_get.assert_called_once_with( + f"{self.forc_connector.FORC_URL}backends/{backend_id}", + timeout=(30, 30), + headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, + verify=True, + ) + + expected_backend = Backend( + id=backend_id, + owner="owner", + location_url="location_url", + template="template", + template_version="template_version", + ) + + self.assertEqual(result, expected_backend) + + @patch("simple_vm_client.forc_connector.forc_connector.requests.get") + def test_get_backend_by_id_timeout(self, mock_get): + backend_id = "your_backend_id" + mock_get.side_effect = requests.Timeout("Unit Test Timeout") + + with self.assertRaises(DefaultException) as context: + self.forc_connector.get_backend_by_id(backend_id) + + self.assertIn("Unit Test Timeout", str(context.exception)) + + @patch("simple_vm_client.forc_connector.forc_connector.requests.get") + def test_get_backends_by_owner_default_exc(self, mock_get): + mock_response = MagicMock(status_code=401) + mock_get.return_value = mock_response + with self.assertRaises(DefaultException): + self.forc_connector.get_backends_by_owner(owner="user") + + @patch("simple_vm_client.forc_connector.forc_connector.requests.get") + def test_get_backends_by_owner(self, mock_get): + owner = "your_owner" + mock_response = MagicMock() + mock_response.json.return_value = [ + { + "id": "backend_id_1", + "owner": owner, + "location_url": "location_url_1", + "template": "template_1", + "template_version": "template_version_1", + }, + { + "id": "backend_id_2", + "owner": owner, + "location_url": "location_url_2", + "template": "template_2", + "template_version": "template_version_2", + }, + ] + mock_get.return_value = mock_response + + result = self.forc_connector.get_backends_by_owner(owner) + + mock_get.assert_called_once_with( + f"{self.forc_connector.FORC_URL}backends/byOwner/{owner}", + timeout=(30, 30), + headers={"X-API-KEY": self.forc_connector.FORC_API_KEY}, + verify=True, + ) + + expected_backends = [ + Backend( + id="backend_id_1", + owner=owner, + location_url="location_url_1", + template="template_1", + template_version="template_version_1", + ), + Backend( + id="backend_id_2", + owner=owner, + location_url="location_url_2", + template="template_2", + template_version="template_version_2", + ), + ] + + self.assertEqual(result, expected_backends) + + @patch("simple_vm_client.forc_connector.forc_connector.requests.get") + def test_get_backends_by_owner_timeout(self, mock_get): + owner = "your_owner" + mock_get.side_effect = requests.Timeout("Unit Test Timeout") + + with self.assertRaises(DefaultException) as context: + self.forc_connector.get_backends_by_owner(owner) + self.assertIn("Unit Test Timeout", str(context.exception)) From 0affae481d8ee13e5440247c126ff88a30e61b02 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Thu, 4 Jan 2024 16:15:58 +0100 Subject: [PATCH 38/39] fixed missing --- simple_vm_client/config/config_giessen.yml | 58 +++++++++++++++++++ .../forc_connector/template/template.py | 2 +- 2 files changed, 59 insertions(+), 1 deletion(-) create mode 100644 simple_vm_client/config/config_giessen.yml diff --git a/simple_vm_client/config/config_giessen.yml b/simple_vm_client/config/config_giessen.yml new file mode 100644 index 0000000..57735d5 --- /dev/null +++ b/simple_vm_client/config/config_giessen.yml @@ -0,0 +1,58 @@ +redis: + host: simplevm_client_giessen_redis + port: 6379 + password: "" + +production: False + +logger: + level: INFO + file: log/portal_client.log + file_backup_count: 5 + max_bytes: 1073741824 + +server: + threads: 30 + host: 0.0.0.0 + port: 9090 + # If you use docker-compose this path needs to be the path you mount the server.pem into + certfile: /code/VirtualMachineService/keys/server.pem + use_ssl: False + +openstack: + cloud_site: giessen + gateway_security_group_id: b9ccddab-7e17-4df5-8282-48fd593500a8 + host: 0.0.0.0 + # Client Port + port: 9090 + # Gateway IP + gateway_ip: 134.176.27.238 + + # If set to True the client will use a Gateway instead of providing floating IPs for each instance. + use_gateway: True + + set_password: False + # network where the project is located + network: SimpleVMGieTest-Netzwerk + + # subnetwork for starting Clusters + sub_network: SimpleVMGieTest-Subnetz + + ssh_port_calculation: 30000 + x + y * 256 + udp_port_calculation: 30000 + x + y * 256 + +cloud_site: giessen + + +bibigrid: + # Url for Bibigrid API + port: 8080 + host: simplevm_bibigrid + https: False + sub_network: portalexternalsubnetwork + modes: + - slurm +forc: + forc_url: https://proxy-dev.gi.denbi.de:4443/ + github_playbooks_repo: https://github.com/deNBI/resenvs/archive/refs/heads/staging.zip + forc_security_group_id: 5adb9f48-1e57-463b-9742-573ba2dabae2 diff --git a/simple_vm_client/forc_connector/template/template.py b/simple_vm_client/forc_connector/template/template.py index 0d1b2b4..2fb191f 100644 --- a/simple_vm_client/forc_connector/template/template.py +++ b/simple_vm_client/forc_connector/template/template.py @@ -26,7 +26,7 @@ DIRECTION = "direction" PROTOCOL = "protocol" INFORMATION_FOR_DISPLAY = "information_for_display" -NO_TEMPLATE_NAMES = ["packer", "optional", ".github", "cluster"] +NO_TEMPLATE_NAMES = ["packer", "optional", ".github", "cluster", "conda"] NEEDS_FORC_SUPPORT = "needs_forc_support" MIN_RAM = "min_ram" MIN_CORES = "min_cores" From c55be14bc8fbfa55a23f7b89dec499a8f5225782 Mon Sep 17 00:00:00 2001 From: dweinholz Date: Fri, 5 Jan 2024 10:01:58 +0100 Subject: [PATCH 39/39] merged dev --- simple_vm_client/VirtualMachineService.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/simple_vm_client/VirtualMachineService.py b/simple_vm_client/VirtualMachineService.py index af027de..091e974 100644 --- a/simple_vm_client/VirtualMachineService.py +++ b/simple_vm_client/VirtualMachineService.py @@ -10,11 +10,10 @@ from thrift.protocol.TProtocol import TProtocolException from thrift.TRecursive import fix_spec -from .ttypes import * import sys import logging -from ttypes import * +from .ttypes import * from thrift.Thrift import TProcessor from thrift.transport import TTransport all_structs = []