Skip to content

Commit

Permalink
azure: Add support for cleaning up Image Galleries
Browse files Browse the repository at this point in the history
  • Loading branch information
ricardobranco777 committed Jul 26, 2023
1 parent 1a60608 commit efa86c7
Show file tree
Hide file tree
Showing 2 changed files with 106 additions and 44 deletions.
56 changes: 39 additions & 17 deletions ocw/lib/azure.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
from azure.storage.blob import BlobServiceClient
from azure.core.exceptions import ResourceNotFoundError
from msrest.exceptions import AuthenticationError
from dateutil.parser import parse
from webui.PCWConfig import PCWConfig
from .provider import Provider
from ..models import Instance
Expand Down Expand Up @@ -96,6 +97,9 @@ def get_vm_types_in_resource_group(self, resource_group: str) -> str:
return ', '.join(type_set)
return "N/A"

def get_resource_properties(self, resource_id):
return self.resource_mgmt_client().resources.get_by_id(resource_id, api_version="2023-07-03").properties

def list_resource_groups(self) -> list:
return list(self.resource_mgmt_client().resource_groups.list())

Expand All @@ -106,22 +110,24 @@ def delete_resource(self, resource_id: str) -> None:
self.log_info(f"Deleting of resource group {resource_id}")
self.resource_mgmt_client().resource_groups.begin_delete(resource_id)

def list_images_by_resource_group(self, resource_group):
return self.list_by_resource_group(resource_group,
filters="resourceType eq 'Microsoft.Compute/images'")
def list_images(self):
return self.list_resource(filters="resourceType eq 'Microsoft.Compute/images'")

def list_disks(self):
return self.list_resource(filters="resourceType eq 'Microsoft.Compute/disks'")

def list_disks_by_resource_group(self, resource_group):
return self.list_by_resource_group(resource_group,
filters="resourceType eq 'Microsoft.Compute/disks'")
def list_galleries(self):
return self.list_resource(filters="resourceType eq 'Microsoft.Compute/galleries'")

def list_by_resource_group(self, resource_group, filters=None) -> list:
def list_resource(self, filters=None) -> list:
return list(self.resource_mgmt_client().resources.list_by_resource_group(
resource_group, filter=filters, expand="changedTime"))
self.__resource_group, filter=filters, expand="changedTime"))

def cleanup_all(self) -> None:
self.log_info("Call cleanup_all")
self.cleanup_images_from_rg()
self.cleanup_disks_from_rg()
self.cleanup_images()
self.cleanup_versions()
self.cleanup_disks()
self.cleanup_blob_containers()

@staticmethod
Expand Down Expand Up @@ -157,25 +163,41 @@ def cleanup_blob_containers(self) -> None:
self.log_info(f"Deleting blob {blob.name}")
self.container_client(container.name).delete_blob(blob.name, delete_snapshots="include")

def cleanup_images_from_rg(self) -> None:
self.log_dbg("Call cleanup_images_from_rg")
for item in self.list_images_by_resource_group(self.__resource_group):
def cleanup_images(self) -> None:
self.log_dbg("Call cleanup_images")
for item in self.list_images():
if self.is_outdated(item.changed_time):
if self.dry_run:
self.log_info(f"Deletion of image {item.name} skipped due to dry run mode")
else:
self.log_info(f"Delete image '{item.name}'")
self.compute_mgmt_client().images.begin_delete(self.__resource_group, item.name)

def cleanup_disks_from_rg(self) -> None:
self.log_dbg("Call cleanup_disks_from_rg")
for item in self.list_disks_by_resource_group(self.__resource_group):
def cleanup_disks(self) -> None:
self.log_dbg("Call cleanup_disks")
for item in self.list_disks():
if self.is_outdated(item.changed_time):
if self.compute_mgmt_client().disks.get(self.__resource_group, item.name).managed_by:
self.log_warn(f"Disk is in use - unable delete {item.name}")
self.log_warn(f"Disk is in use - skipping {item.name}")
else:
if self.dry_run:
self.log_info(f"Deletion of disk {item.name} skipped due to dry run mode")
else:
self.log_info(f"Delete disk '{item.name}'")
self.compute_mgmt_client().disks.begin_delete(self.__resource_group, item.name)

def cleanup_versions(self) -> None:
self.log_dbg("Call cleanup_versions")
for gallery in self.list_galleries():
for image in self.compute_mgmt_client().gallery_images.list_by_gallery(self.__resource_group, gallery.name):
for version in self.compute_mgmt_client().gallery_image_versions.list_by_gallery_image(
self.__resource_group, gallery.name, image.name):
properties = self.get_resource_properties(version.id)
if self.is_outdated(parse(properties['publishingProfile']['publishedDate'])):
if self.dry_run:
self.log_info(f"Deletion of version {gallery.name}/{image.name}/{version.name} skipped due to dry run mode")
else:
self.log_info(f"Delete version '{gallery.name}/{image.name}/{version.name}'")
self.compute_mgmt_client().gallery_image_versions.begin_delete(
self.__resource_group, gallery.name, image.name, version.name
)
94 changes: 67 additions & 27 deletions tests/test_azure.py
Original file line number Diff line number Diff line change
Expand Up @@ -50,20 +50,45 @@ def bs_client_one_pcw_ignore(monkeypatch):
@pytest.fixture
def mock_compute_mgmt_client(monkeypatch):

global deleted_images
global deleted_images, deleted_versions
# to make sure that we not failing due to other test left dirty env.
deleted_images = list()
deleted_versions = list()

def mock_compute_mgmt_client(self):
def compute_mgmt_client():
pass
compute_mgmt_client.images = lambda: None
compute_mgmt_client.images.begin_delete = lambda rg, name: deleted_images.append(name)
compute_mgmt_client.gallery_images = lambda: None
compute_mgmt_client.gallery_images.list_by_gallery = lambda rg, name: [FakeItem(name=name)]
compute_mgmt_client.gallery_image_versions = lambda: None
compute_mgmt_client.gallery_image_versions.list_by_gallery_image = lambda rg, gname, name: [FakeItem(name=name)]
compute_mgmt_client.gallery_image_versions.begin_delete = lambda rg, gname, iname, name: deleted_versions.append(name)
return compute_mgmt_client

monkeypatch.setattr(Azure, 'compute_mgmt_client', mock_compute_mgmt_client)


@pytest.fixture
def mock_resource_mgmt_client(monkeypatch):
def mock_resource_mgmt_client(self):
def resource_mgmt_client():
pass
d = {'publishingProfile': {'publishedDate': '2023-04-18T19:07:14.1077055+00:00'}}
resource_mgmt_client.resources = lambda: None
resource_mgmt_client.resources.get_by_id = lambda *_, **k: FakeIt(properties=d)
return resource_mgmt_client
monkeypatch.setattr(Azure, 'resource_mgmt_client', mock_resource_mgmt_client)


class FakeIt:
def __init__(self, **kwargs):
self.name = "fake"
for k in kwargs:
setattr(self, k, kwargs[k])


class FakeDisk:

def __init__(self, managed_by=None):
Expand Down Expand Up @@ -160,29 +185,47 @@ def test_cleanup_blob_containers_all_new_one_pcw_ignore(azure_patch, container_c
assert container_client_all_new.deleted_blobs == []


def test_cleanup_images_from_rg_all_new(azure_patch, monkeypatch, mock_compute_mgmt_client):
monkeypatch.setattr(Azure, 'list_by_resource_group', lambda *args, **kwargs: [FakeItem(), FakeItem()])
azure_patch.cleanup_images_from_rg()
def test_cleanup_images_all_new(azure_patch, monkeypatch, mock_compute_mgmt_client):
monkeypatch.setattr(Azure, 'list_resource', lambda *args, **kwargs: [FakeItem(), FakeItem()])
azure_patch.cleanup_images()

assert len(deleted_images) == 0


def test_cleanup_images_from_rg_one_old(azure_patch, monkeypatch, mock_compute_mgmt_client):
def test_cleanup_images_one_old(azure_patch, monkeypatch, mock_compute_mgmt_client):
old_times = datetime.now(timezone.utc) - timedelta(hours=generators.max_age_hours+1)
monkeypatch.setattr(Azure, 'list_by_resource_group', lambda *args, **kwargs: [FakeItem(old_times, "to_delete"),
FakeItem()
])
monkeypatch.setattr(Azure, 'list_resource', lambda *args, **kwargs: [FakeItem(old_times, "to_delete"), FakeItem()])
azure_patch.dry_run = True
azure_patch.cleanup_images_from_rg()
azure_patch.cleanup_images()
assert len(deleted_images) == 0

azure_patch.dry_run = False
azure_patch.cleanup_images_from_rg()
azure_patch.cleanup_images()
assert len(deleted_images) == 1
assert deleted_images[0] == "to_delete"


def test_cleanup_disks_from_rg_all_new(azure_patch, monkeypatch):
def test_cleanup_versions_all_new(azure_patch, monkeypatch, mock_compute_mgmt_client, mock_resource_mgmt_client):
monkeypatch.setattr(Azure, 'list_resource', lambda *args, **kwargs: [FakeIt(), FakeIt()])
azure_patch.cleanup_versions()

assert len(deleted_versions) == 0


def test_cleanup_versions_one_old(azure_patch, monkeypatch, mock_compute_mgmt_client, mock_resource_mgmt_client):
old_times = datetime.now(timezone.utc) - timedelta(hours=generators.max_age_hours+1)
monkeypatch.setattr(Azure, 'list_resource', lambda *args, **kwargs: [FakeIt(), FakeIt()])
azure_patch.dry_run = True
azure_patch.cleanup_versions()
assert len(deleted_versions) == 0

azure_patch.dry_run = False
azure_patch.cleanup_versions()
assert len(deleted_versions) == 1
assert deleted_versions[0] == "to_delete"


def test_cleanup_disks_all_new(azure_patch, monkeypatch):

global deleted_images
# to make sure that we not failing due to other test left dirty env.
Expand All @@ -199,13 +242,13 @@ def compute_mgmt_client():

monkeypatch.setattr(Azure, 'compute_mgmt_client', mock_compute_mgmt_client)

monkeypatch.setattr(Azure, 'list_by_resource_group', lambda *args, **kwargs: [FakeItem(), FakeItem()])
azure_patch.cleanup_disks_from_rg()
monkeypatch.setattr(Azure, 'list_resource', lambda *args, **kwargs: [FakeItem(), FakeItem()])
azure_patch.cleanup_disks()

assert len(deleted_images) == 0


def test_cleanup_disks_from_rg_one_old_no_managed_by(azure_patch, monkeypatch):
def test_cleanup_disks_one_old_no_managed_by(azure_patch, monkeypatch):
global deleted_images
# to make sure that we not failing due to other test left dirty env.
deleted_images = list()
Expand All @@ -222,20 +265,18 @@ def compute_mgmt_client():
monkeypatch.setattr(Azure, 'compute_mgmt_client', mock_compute_mgmt_client)

old_times = datetime.now(timezone.utc) - timedelta(hours=generators.max_age_hours+1)
monkeypatch.setattr(Azure, 'list_by_resource_group', lambda *args, **kwargs: [FakeItem(old_times, "to_delete"),
FakeItem()
])
monkeypatch.setattr(Azure, 'list_resource', lambda *args, **kwargs: [FakeItem(old_times, "to_delete"), FakeItem()])
azure_patch.dry_run = True
azure_patch.cleanup_disks_from_rg()
azure_patch.cleanup_disks()
assert len(deleted_images) == 0

azure_patch.dry_run = False
azure_patch.cleanup_disks_from_rg()
azure_patch.cleanup_disks()
assert len(deleted_images) == 1
assert deleted_images[0] == "to_delete"


def test_cleanup_disks_from_rg_one_old_with_managed_by(azure_patch, monkeypatch):
def test_cleanup_disks_one_old_with_managed_by(azure_patch, monkeypatch):
global deleted_images
# to make sure that we not failing due to other test left dirty env.
deleted_images = list()
Expand All @@ -252,10 +293,8 @@ def compute_mgmt_client():
monkeypatch.setattr(Azure, 'compute_mgmt_client', mock_compute_mgmt_client)

old_times = datetime.now(timezone.utc) - timedelta(hours=generators.max_age_hours+1)
monkeypatch.setattr(Azure, 'list_by_resource_group', lambda *args, **kwargs: [FakeItem(old_times, "to_delete"),
FakeItem()
])
azure_patch.cleanup_disks_from_rg()
monkeypatch.setattr(Azure, 'list_resource', lambda *args, **kwargs: [FakeItem(old_times, "to_delete"), FakeItem()])
azure_patch.cleanup_disks()

assert len(deleted_images) == 0

Expand All @@ -269,13 +308,14 @@ def count_call(*args, **kwargs):

monkeypatch.setattr(Azure, 'get_storage_key', lambda *args, **kwargs: 'FOOXX')
monkeypatch.setattr(Azure, 'cleanup_blob_containers', count_call)
monkeypatch.setattr(Azure, 'cleanup_disks_from_rg', count_call)
monkeypatch.setattr(Azure, 'cleanup_images_from_rg', count_call)
monkeypatch.setattr(Azure, 'cleanup_disks', count_call)
monkeypatch.setattr(Azure, 'cleanup_images', count_call)
monkeypatch.setattr(Azure, 'cleanup_versions', count_call)
monkeypatch.setattr(Provider, 'read_auth_json', lambda *args, **kwargs: '{}')

az = Azure('fake')
az.cleanup_all()
assert called == 3
assert called == 4


def test_check_credentials(monkeypatch):
Expand Down

0 comments on commit efa86c7

Please sign in to comment.