diff --git a/docs/requirements.txt b/docs/requirements.txt index adcb0ecb..b0d6ad46 100644 --- a/docs/requirements.txt +++ b/docs/requirements.txt @@ -11,4 +11,4 @@ anytree IPython polib azure.mgmt.resource -jsonpath_ng +jsonpath_ng \ No newline at end of file diff --git a/environment.yml b/environment.yml index d32bcb46..01c321d6 100644 --- a/environment.yml +++ b/environment.yml @@ -6,7 +6,7 @@ dependencies: - pytest-cov - pytest-mock - pip: - - semantic-link-sempy>=0.7.5 + - semantic-link-sempy>=0.8.0 - azure-identity==1.7.1 - azure-storage-blob>=12.9.0 - pandas-stubs diff --git a/notebooks/Capacity Migration.ipynb b/notebooks/Capacity Migration.ipynb new file mode 100644 index 00000000..d6653a35 --- /dev/null +++ b/notebooks/Capacity Migration.ipynb @@ -0,0 +1 @@ +{"cells":[{"cell_type":"markdown","id":"5c27dfd1-4fe0-4a97-92e6-ddf78889aa93","metadata":{"nteract":{"transient":{"deleting":false}}},"source":["### Install the latest .whl package\n","\n","Check [here](https://pypi.org/project/semantic-link-labs/) to see the latest version."]},{"cell_type":"code","execution_count":null,"id":"d5cae9db-cef9-48a8-a351-9c5fcc99645c","metadata":{"jupyter":{"outputs_hidden":true,"source_hidden":false},"nteract":{"transient":{"deleting":false}}},"outputs":[],"source":["%pip install semantic-link-labs"]},{"cell_type":"markdown","id":"2856d26d","metadata":{},"source":["### Requirements\n","* Must have an Azure Subscription\n","* Must [register an App](https://ms.portal.azure.com/#blade/Microsoft_AAD_RegisteredApps/ApplicationsListBlade)\n"," * Permissions: Will need the Contributor role at the scope where the resources will be created, which is often the subscription level\n","* Azure Key Vault\n"," * [Set up](https://learn.microsoft.com/azure/key-vault/secrets/quick-create-portal) within the Azure Subscription\n"," * Save secrets for the Tenant ID, Client ID (Application ID), Client Secret\n"," * Permissions: Ensure the user who will be executing the notebooks has “Key Vault Secrets User”\n","* Fabric Permissions\n"," * User should be a tenant admin. This ensures they have the necessary authority to execute and manage the notebooks without encountering permission issues.\n","\n","### Result\n","* F skus are created for each (specified) capacity\n"," * Within the same region as the P SKU\n"," * Equivalent SKU size as the P SKU\n"," * Same admins as listed on the P SKU\n"," * All workspaces are migrated to the corresponding new capacity\n"," * Capacity settings from the P SKU are transferred to the F SKU\n"," * Capacity settings\n"," * Notification settings\n"," * Access settings\n"," * Disaster recovery settings\n"," * Spark settings\n"," * Delegated tenant settings\n","* The names of the newly created F SKU capacities will be an alphanumeric lowercase version of the P SKU capacity name, suffixed with 'fsku'. As an example: \"My capacity_3!\" -> \"mycapacity3fsku\"."]},{"cell_type":"markdown","id":"b195eae8","metadata":{},"source":["### Import the library and set the initial parameters"]},{"cell_type":"code","execution_count":null,"id":"1344e286","metadata":{},"outputs":[],"source":["import sempy_labs as labs\n","\n","azure_subscription_id = '' # Enter your Azure subscription ID\n","key_vault_uri = '' # Enter your Azure Key Vault URI\n","key_vault_tenant_id = '' # Enter the name of the Azure Key Vault secret which stores your Tenant ID\n","key_vault_client_id = '' # Enter the name of the Azure Key Vault secret which stores your Client ID (Application ID)\n","key_vault_client_secret = '' # Enter the name of the Azure Key Vault secret which stores your Client Secret\n","resource_group = '' # Enter the name of the resource group (to be used to create the new F skus)"]},{"cell_type":"markdown","id":"5a3fe6e8-b8aa-4447-812b-7931831e07fe","metadata":{"nteract":{"transient":{"deleting":false}}},"source":["### Migrate a single P SKU -> F SKU\n","Set the 'capacities' parameter to the single P SKU."]},{"cell_type":"code","execution_count":null,"id":"3655dd88","metadata":{},"outputs":[],"source":["labs.migrate_capacities(\n"," azure_subscription_id = azure_subscription_id,\n"," key_vault_uri = key_vault_uri,\n"," key_vault_tenant_id = key_vault_tenant_id,\n"," key_vault_client_id = key_vault_client_id,\n"," key_vault_client_secret = key_vault_client_secret,\n"," resource_group = resource_group,\n"," capacities = 'CapacityA',\n"," p_sku_only = True,\n",")"]},{"cell_type":"markdown","id":"175a59b8","metadata":{},"source":["### Migrate a list of P SKUs to F SKUs\n","Set the 'capacities' parameter to a list of P SKUs."]},{"cell_type":"code","execution_count":null,"id":"3a7a80ec","metadata":{},"outputs":[],"source":["labs.migrate_capacities(\n"," azure_subscription_id = azure_subscription_id,\n"," key_vault_uri = key_vault_uri,\n"," key_vault_tenant_id = key_vault_tenant_id,\n"," key_vault_client_id = key_vault_client_id,\n"," key_vault_client_secret = key_vault_client_secret,\n"," resource_group = resource_group,\n"," capacities = ['CapacityA', 'CapacityB', 'CapacityC'],\n"," p_sku_only = True,\n",")"]},{"cell_type":"markdown","id":"30438799","metadata":{},"source":["### Migrate all P SKUs to F SKUs\n","Set the 'capacities' parameter to None."]},{"cell_type":"code","execution_count":null,"id":"315c2dc7","metadata":{},"outputs":[],"source":["labs.migrate_capacities(\n"," azure_subscription_id = azure_subscription_id,\n"," key_vault_uri = key_vault_uri,\n"," key_vault_tenant_id = key_vault_tenant_id,\n"," key_vault_client_id = key_vault_client_id,\n"," key_vault_client_secret = key_vault_client_secret,\n"," resource_group = resource_group,\n"," capacities = None,\n"," p_sku_only = True,\n",")"]},{"cell_type":"markdown","id":"1d8e73b2","metadata":{},"source":["### Migrate a list of P SKUs to F SKUs; associate each capacity with a specific resource group\n","This process ensures that each F SKU is created within the resource group specified in the resource_group_mapping dictionary."]},{"cell_type":"code","execution_count":null,"id":"2854bf8a","metadata":{},"outputs":[],"source":["resource_group_mapping = {\n"," \"CapacityA\": \"ResourceGroupA\",\n"," \"CapacityB\": \"ResourceGroupA\",\n"," \"CapacityC\": \"ResourceGroupB\",\n","}\n","\n","labs.migrate_capacities(\n"," azure_subscription_id = azure_subscription_id,\n"," key_vault_uri = key_vault_uri,\n"," key_vault_tenant_id = key_vault_tenant_id,\n"," key_vault_client_id = key_vault_client_id,\n"," key_vault_client_secret = key_vault_client_secret,\n"," resource_group = resource_group_mapping,\n"," capacities = ['CapacityA', 'CapacityB', 'CapacityC'],\n"," p_sku_only = True,\n",")"]},{"cell_type":"markdown","id":"c3f497c8","metadata":{},"source":["### Migrate a single P SKU (already created F SKU)"]},{"cell_type":"code","execution_count":null,"id":"a4f0b5a2","metadata":{},"outputs":[],"source":["source_capacity = '' # Enter the P SKU capacity name\n","target_capacity = '' # Enter the F SKU capacity name (already exists) \n","\n","labs.migrate_workspaces(\n"," source_capacity=source_capacity, \n"," target_capacity=target_capacity\n",")\n","\n","# Optionally migrate settings\n","\"\"\"\n","labs.migrate_capacity_settings(\n"," source_capacity=source_capacity, \n"," target_capacity=target_capacity\n",")\n","labs.migrate_access_settings(\n"," source_capacity=source_capacity, \n"," target_capacity=target_capacity\n",")\n","labs.migrate_delegated_tenant_settings(\n"," source_capacity=source_capacity, \n"," target_capacity=target_capacity\n",")\n","labs.migrate_disaster_recovery_settings(\n"," source_capacity=source_capacity, \n"," target_capacity=target_capacity\n",")\n","labs.migrate_notification_settings(\n"," source_capacity=source_capacity, \n"," target_capacity=target_capacity\n",")\n","labs.migrate_spark_settings(\n"," source_capacity=source_capacity, \n"," target_capacity=target_capacity\n",")\n","\"\"\""]},{"cell_type":"markdown","id":"e0db744b","metadata":{},"source":["### Migrate a list of P SKUs (already created F SKUs)"]},{"cell_type":"code","execution_count":null,"id":"0e04d519","metadata":{},"outputs":[],"source":["capacity_mapping = {\n"," \"capacitya\": \"capacityafsku\", # Format is \"P SKU\": \"F SKU\"\n"," \"capacityb\": \"capacitybfsku\",\n"," \"capacityc\": \"capacitycfsku\",\n","}\n","\n","p_skus = list(capacity_mapping.keys())\n","\n","for p_sku in p_skus:\n"," labs.migrate_workspaces(\n"," source_capacity=p_sku,\n"," target_capacity=capacity_mapping.get(p_sku)\n"," )\n"," # Optionally migrate settings\n"," \"\"\"\n"," labs.migrate_capacity_settings(\n"," source_capacity=source_capacity, \n"," target_capacity=target_capacity\n"," )\n"," labs.migrate_access_settings(\n"," source_capacity=source_capacity, \n"," target_capacity=target_capacity\n"," )\n"," labs.migrate_delegated_tenant_settings(\n"," source_capacity=source_capacity, \n"," target_capacity=target_capacity\n"," )\n"," labs.migrate_disaster_recovery_settings(\n"," source_capacity=source_capacity, \n"," target_capacity=target_capacity\n"," )\n"," labs.migrate_notification_settings(\n"," source_capacity=source_capacity, \n"," target_capacity=target_capacity\n"," )\n"," labs.migrate_spark_settings(\n"," source_capacity=source_capacity, \n"," target_capacity=target_capacity\n"," )\n"," \"\"\"\n"]}],"metadata":{"kernel_info":{"name":"synapse_pyspark"},"kernelspec":{"display_name":"Synapse PySpark","language":"Python","name":"synapse_pyspark"},"language_info":{"name":"python"},"microsoft":{"language":"python"},"nteract":{"version":"nteract-front-end@1.0.0"},"spark_compute":{"compute_id":"/trident/default"},"synapse_widget":{"state":{},"version":"0.1"},"widgets":{}},"nbformat":4,"nbformat_minor":5} diff --git a/src/sempy_labs/__init__.py b/src/sempy_labs/__init__.py index 8dc777f8..d5a8ddca 100644 --- a/src/sempy_labs/__init__.py +++ b/src/sempy_labs/__init__.py @@ -3,6 +3,36 @@ delete_environment, publish_environment, ) +from sempy_labs._clear_cache import ( + clear_cache, + backup_semantic_model, + restore_semantic_model, + copy_semantic_model_backup_file, + list_backups, + list_storage_account_files, +) +from sempy_labs._capacity_migration import ( + migrate_spark_settings, + migrate_workspaces, + migrate_capacities, + migrate_notification_settings, + migrate_access_settings, + migrate_delegated_tenant_settings, + migrate_capacity_settings, + migrate_disaster_recovery_settings, +) +from sempy_labs._capacities import ( + create_fabric_capacity, + # get_capacity_resource_governance, + # list_vcores, + resume_fabric_capacity, + suspend_fabric_capacity, + update_fabric_capacity, + delete_fabric_capacity, + check_fabric_capacity_name_availablility, + delete_embedded_capacity, + delete_premium_capacity, +) from sempy_labs._spark import ( get_spark_settings, @@ -30,16 +60,6 @@ ConnectWarehouse, ConnectLakehouse, ) -from sempy_labs._capacities import ( - check_fabric_capacity_name_availablility, - delete_fabric_capacity, - resume_fabric_capacity, - update_fabric_capacity, - create_fabric_capacity, - delete_premium_capacity, - suspend_fabric_capacity, - delete_embedded_capacity, -) from sempy_labs._workspace_identity import ( provision_workspace_identity, deprovision_workspace_identity, @@ -63,14 +83,6 @@ assign_workspace_to_dataflow_storage, list_dataflows, ) -from sempy_labs._clear_cache import ( - clear_cache, - backup_semantic_model, - restore_semantic_model, - copy_semantic_model_backup_file, - list_backups, - list_storage_account_files, -) from sempy_labs._connections import ( list_connections, list_item_connections, @@ -193,6 +205,8 @@ "restore_semantic_model", "list_semantic_model_object_report_usage", "list_report_semantic_model_objects", + "migrate_spark_settings", + "create_azure_storage_account", "delete_custom_pool", "clear_cache", # create_connection_cloud, @@ -309,4 +323,21 @@ "suspend_fabric_capacity", "delete_embedded_capacity", "resolve_dataset_from_report", + "migrate_workspaces", + "migrate_capacities", + "create_fabric_capacity", + "migrate_capacity_settings", + # "get_capacity_resource_governance", + # "list_vcores", + "migrate_disaster_recovery_settings", + "migrate_notification_settings", + "migrate_access_settings", + "migrate_delegated_tenant_settings", + "resume_fabric_capacity", + "suspend_fabric_capacity", + "update_fabric_capacity", + "delete_fabric_capacity", + "check_fabric_capacity_name_availablility", + "delete_embedded_capacity", + "delete_premium_capacity", ] diff --git a/src/sempy_labs/_capacities.py b/src/sempy_labs/_capacities.py index bd0177fc..27548991 100644 --- a/src/sempy_labs/_capacities.py +++ b/src/sempy_labs/_capacities.py @@ -5,6 +5,7 @@ from sempy.fabric.exceptions import FabricHTTPException import requests from sempy_labs._helper_functions import get_azure_token_credentials +import pandas as pd def _add_sll_tag(payload, tags): @@ -155,10 +156,10 @@ def create_fabric_capacity( for i in resource_client.resources.list( "resourceType eq 'Microsoft.PowerBIDedicated/capacities'" ): - if i.name == capacity_name.removesuffix(capacity_suffix): + if i.name == capacity_name.removesuffix(icons.migrate_capacity_suffix): resource_group = i.id.split("/")[4] print( - f"{icons.yellow_dot} Override resource group flag detected for A SKUs - using the existing resource group '{resource_group}' for capacity '{capacity_name}'" + f"{icons.yellow_dot} Override resource group flag detected for A SKUs - using the existing resource group '{resource_group}' for the '{capacity_name}' capacity." ) else: # Attempt to get the resource group @@ -207,6 +208,41 @@ def create_fabric_capacity( ) +def list_vcores() -> pd.DataFrame: + + df = pd.DataFrame(columns=["Total Purchased Cores", "Available Cores"]) + + client = fabric.PowerBIRestClient() + response = client.get("capacities/vcores") + if response.status_code != 200: + FabricHTTPException(response) + response_json = response.json() + new_data = { + "Total Purchased Cores": response_json.get("totalPurchasedCores"), + "Available Cores": response_json.get("availableCores"), + } + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + + int_cols = ["Total Purchased Cores", "Available Cores"] + df[int_cols] = df[int_cols].astype(int) + + return df + + +def get_capacity_resource_governance(capacity_name: str): + + dfC = fabric.list_capacities() + dfC_filt = dfC[dfC["Display Name"] == capacity_name] + capacity_id = dfC_filt["Id"].iloc[0].upper() + client = fabric.PowerBIRestClient() + response = client.get(f"capacities/{capacity_id}/resourceGovernance") + + if response.status_code != 200: + FabricHTTPException(response) + + return response.json()["workloadSettings"] + + def suspend_fabric_capacity( capacity_name: str, azure_subscription_id: str, diff --git a/src/sempy_labs/_capacity_migration.py b/src/sempy_labs/_capacity_migration.py new file mode 100644 index 00000000..832b9c13 --- /dev/null +++ b/src/sempy_labs/_capacity_migration.py @@ -0,0 +1,624 @@ +import sempy.fabric as fabric +from typing import Optional, List +from sempy._utils._log import log +import sempy_labs._icons as icons +from sempy.fabric.exceptions import FabricHTTPException +from sempy_labs._list_functions import assign_workspace_to_capacity +from sempy_labs.admin._basic_functions import ( + assign_workspaces_to_capacity, + _list_capacities_meta, +) +from sempy_labs._helper_functions import ( + resolve_capacity_id, + convert_to_alphanumeric_lowercase, +) +from sempy_labs._capacities import create_fabric_capacity + + +@log +def migrate_workspaces( + source_capacity: str, + target_capacity: str, + workspaces: Optional[str | List[str]] = None, +): + """ + This function migrates the workspace(s) from one capacity to another capacity. + Limitation: source & target capacities must be in the same region. + If not all the workspaces succesfully migrated to the target capacity, the migrated workspaces will rollback to be assigned + to the source capacity. + + Parameters + ---------- + source_capacity : str + Name of the source Fabric capacity. + target_capacity : str + Name of the target/destination Fabric capacity. + workspaces : str | List[str], default=None + The name of the workspace(s) specified will be reassigned from the source capacity to the target capacity. + Defaults to None which will reassign all workspaces in the source capacity to the target capacity. + """ + + if isinstance(workspaces, str): + workspaces = [workspaces] + + dfC = _list_capacities_meta() + dfC_filt = dfC[dfC["Capacity Name"] == source_capacity] + if len(dfC_filt) == 0: + raise ValueError( + f"{icons.red_dot} Invalid source capacity. The '{source_capacity}' capacity does not exist." + ) + source_capacity_region = dfC_filt["Region"].iloc[0] + source_capacity_id = dfC_filt["Capacity Id"].iloc[0] + dfC_filt = dfC[dfC["Capacity Name"] == target_capacity] + if len(dfC_filt) == 0: + raise ValueError( + f"{icons.red_dot} Invalid target capacity. The '{target_capacity}' capacity does not exist." + ) + target_capacity_region = dfC_filt["Region"].iloc[0] + target_capacity_state = dfC_filt["State"].iloc[0] + + if source_capacity_region != target_capacity_region: + raise ValueError( + f"{icons.red_dot} The '{source_capacity}' and '{target_capacity}' are not in the same region." + ) + if target_capacity_state != "Active": + raise ValueError( + f"{icons.red_dot} The '{target_capacity}' target capacity is inactive. The capacity must be active in order for workspaces to be migrated." + ) + + dfW = fabric.list_workspaces(filter=f"capacityId eq '{source_capacity_id.upper()}'") + if workspaces is None: + workspace_count = len(dfW) + else: + workspace_count = len(workspaces) + migrated_workspaces = [] + + for i, r in dfW.iterrows(): + workspace = r["Name"] + + if workspaces is None or workspace in workspaces: + pass + else: + continue + + if assign_workspace_to_capacity( + capacity_name=target_capacity, workspace=workspace + ): + migrated_workspaces.append(workspace) + + if len(migrated_workspaces) < workspace_count: + print( + f"{icons.warning} Not all workspaces in the '{source_capacity}' capacity were migrated to the '{target_capacity}' capacity." + ) + print(f"{icons.in_progress} Initiating rollback...") + for i, r in dfW.iterrows(): + workspace = r["Name"] + if workspace in migrated_workspaces: + assign_workspace_to_capacity( + capacity_name=source_capacity, workspace=workspace + ) + print( + f"{icons.green_dot} Rollback of the workspaces to the '{source_capacity}' capacity is complete." + ) + else: + print( + f"{icons.green_dot} All workspaces were migrated from the '{source_capacity}' capacity to the '{target_capacity}' capacity succesfully." + ) + + +@log +def migrate_capacities( + azure_subscription_id: str, + key_vault_uri: str, + key_vault_tenant_id: str, + key_vault_client_id: str, + key_vault_client_secret: str, + resource_group: str | dict, + capacities: Optional[str | List[str]] = None, + use_existing_rg_for_A_sku: Optional[bool] = True, + p_sku_only: Optional[bool] = True, +): + """ + This function creates new Fabric capacities for given A or P sku capacities and reassigns their workspaces to the newly created capacity. + + Parameters + ---------- + azure_subscription_id : str + The Azure subscription ID. + key_vault_uri : str + The name of the `Azure key vault `_ URI. Example: "https://.vault.azure.net/" + key_vault_tenant_id : str + The name of the Azure key vault secret storing the Tenant ID. + key_vault_client_id : str + The name of the Azure key vault secret storing the Client ID. + key_vault_client_secret : str + The name of the Azure key vault secret storing the Client Secret. + resource_group : str | dict + The name of the Azure resource group. + For A skus, this parameter will be ignored and the resource group used for the F sku will be the same as the A sku's resource group. + For P skus, if this parameter is a string, it will use that resource group for all of the newly created F skus. + if this parameter is a dictionary, it will use that mapping (capacity name -> resource group) for creating capacities with the mapped resource groups. + capacities : str | List[str], default=None + The capacity(ies) to migrate from A/P -> F sku. + Defaults to None which migrates all accessible A/P sku capacities to F skus. + use_existing_rg_for_A_sku : bool, default=True + If True, the F sku inherits the resource group from the A sku (for A sku migrations) + p_sku_only : bool, default=True + If set to True, only migrates P skus. If set to False, migrates both P and A skus. + """ + + from sempy_labs._list_functions import list_capacities + + if isinstance(capacities, str): + capacities = [capacities] + + p_sku_list = list(icons.sku_mapping.keys()) + + dfC = list_capacities() + + if capacities is None: + dfC_filt = dfC.copy() + else: + dfC_filt = dfC[dfC["Display Name"].isin(capacities)] + + if p_sku_only: + dfC_filt = dfC_filt[dfC_filt["Sku"].str.startswith("P")] + else: + dfC_filt = dfC_filt[ + (dfC_filt["Sku"].str.startswith(("P", "A"))) + & (~dfC_filt["Sku"].str.startswith("PP")) + ] + + dfC_filt = ( + dfC_filt.copy() + ) # Something strange is happening here. Without this a key error on Display Name occurs + + if len(dfC_filt) == 0: + print(f"{icons.info} There are no valid capacities to migrate.") + return + + for _, r in dfC_filt.iterrows(): + cap_name = r["Display Name"] + region = r["Region"] + sku_size = r["Sku"] + admins = r["Admins"] + tgt_capacity = f"{convert_to_alphanumeric_lowercase(cap_name)}{icons.migrate_capacity_suffix}" + + # Check if target capacity exists + dfC_tgt = dfC[dfC["Display Name"] == tgt_capacity] + + if sku_size[:1] == "A" and use_existing_rg_for_A_sku: + rg = None + else: + if isinstance(resource_group, str): + rg = resource_group + elif isinstance(resource_group, dict): + rg = resource_group.get(cap_name) + else: + raise ValueError(f"{icons.red_dot} Invalid 'resource_group' parameter.") + + if sku_size in p_sku_list: + # Only create the capacity if it does not already exist + if len(dfC_tgt) > 0: + print( + f"{icons.info} Skipping creating a new capacity for '{cap_name}' as the '{tgt_capacity}' capacity already exists." + ) + else: + create_fabric_capacity( + capacity_name=tgt_capacity, + azure_subscription_id=azure_subscription_id, + key_vault_uri=key_vault_uri, + key_vault_tenant_id=key_vault_tenant_id, + key_vault_client_id=key_vault_client_id, + key_vault_client_secret=key_vault_client_secret, + resource_group=rg, + region=region, + sku=icons.sku_mapping.get(sku_size), + admin_members=admins, + ) + # Migrate workspaces to new capacity + assign_workspaces_to_capacity( + source_capacity=cap_name, target_capacity=tgt_capacity, workspace=None + ) + + # Migrate settings to new capacity + migrate_capacity_settings( + source_capacity=cap_name, target_capacity=tgt_capacity + ) + migrate_access_settings( + source_capacity=cap_name, target_capacity=tgt_capacity + ) + migrate_notification_settings( + source_capacity=cap_name, target_capacity=tgt_capacity + ) + migrate_delegated_tenant_settings( + source_capacity=cap_name, target_capacity=tgt_capacity + ) + migrate_disaster_recovery_settings( + source_capacity=cap_name, target_capacity=tgt_capacity + ) + migrate_spark_settings( + source_capacity=cap_name, target_capacity=tgt_capacity + ) + + +@log +def migrate_capacity_settings(source_capacity: str, target_capacity: str): + """ + This function migrates a capacity's settings to another capacity. + + Parameters + ---------- + source_capacity : str + Name of the source capacity. + target_capacity : str + Name of the target capacity. + + Returns + ------- + """ + + dfC = fabric.list_capacities() + dfC_filt = dfC[dfC["Display Name"] == source_capacity] + if len(dfC_filt) == 0: + raise ValueError( + f"{icons.red_dot} The '{source_capacity}' capacity does not exist." + ) + source_capacity_id = dfC_filt["Id"].iloc[0].upper() + dfC_filt = dfC[dfC["Display Name"] == target_capacity] + if len(dfC_filt) == 0: + raise ValueError( + f"{icons.red_dot} The '{target_capacity}' capacity does not exist." + ) + target_capacity_id = dfC_filt["Id"].iloc[0].upper() + + workloads_params = "capacityCustomParameters?workloadIds=ADM&workloadIds=CDSA&workloadIds=DMS&workloadIds=RsRdlEngine&workloadIds=ScreenshotEngine&workloadIds=AS&workloadIds=QES&workloadIds=DMR&workloadIds=ESGLake&workloadIds=NLS&workloadIds=lake&workloadIds=TIPS&workloadIds=Kusto&workloadIds=Lakehouse&workloadIds=SparkCore&workloadIds=DI&workloadIds=Notebook&workloadIds=ML&workloadIds=ES&workloadIds=Reflex&workloadIds=Must&workloadIds=dmh&workloadIds=PowerBI&workloadIds=HLS" + + client = fabric.PowerBIRestClient() + response_get_source = client.get( + f"capacities/{source_capacity_id}/{workloads_params}" + ) + if response_get_source.status_code != 200: + raise FabricHTTPException(response_get_source) + + response_source_json = response_get_source.json().get( + "capacityCustomParameters", {} + ) + + # Create payload for put request + def remove_empty_workloads(data): + keys_to_remove = [ + key for key, value in data.items() if not value["workloadCustomParameters"] + ] + for key in keys_to_remove: + del data[key] + + remove_empty_workloads(response_source_json) + + settings_json = {} + settings_json["capacityCustomParameters"] = {} + + for workload in response_source_json: + if workload not in ["AI"]: + settings_json["capacityCustomParameters"][workload] = {} + settings_json["capacityCustomParameters"][workload][ + "workloadCustomParameters" + ] = {} + + for workload_part in response_source_json[workload].values(): + for workload_item in workload_part: + setting_name = workload_item["name"] + setting_value = workload_item["value"] + if setting_value is None: + settings_json["capacityCustomParameters"][workload][ + "workloadCustomParameters" + ][setting_name] = setting_value + elif isinstance(setting_value, bool): + settings_json["capacityCustomParameters"][workload][ + "workloadCustomParameters" + ][setting_name] = bool(setting_value) + elif isinstance(setting_value, str): + settings_json["capacityCustomParameters"][workload][ + "workloadCustomParameters" + ][setting_name] = str(setting_value) + else: + settings_json["capacityCustomParameters"][workload][ + "workloadCustomParameters" + ][setting_name] = setting_value + + response_put = client.put( + f"capacities/{target_capacity_id}/{workloads_params}", + json=settings_json, + ) + if response_put.status_code != 204: + raise FabricHTTPException(response_put) + + print( + f"{icons.green_dot} The capacity settings have been migrated from the '{source_capacity}' capacity to the '{target_capacity}' capacity." + ) + + +@log +def migrate_disaster_recovery_settings(source_capacity: str, target_capacity: str): + """ + This function migrates a capacity's disaster recovery settings to another capacity. + + Parameters + ---------- + source_capacity : str + Name of the source capacity. + target_capacity : str + Name of the target capacity. + """ + + dfC = fabric.list_capacities() + dfC_filt = dfC[dfC["Display Name"] == source_capacity] + if len(dfC_filt) == 0: + raise ValueError( + f"{icons.red_dot} The '{source_capacity}' capacity does not exist." + ) + source_capacity_id = dfC_filt["Id"].iloc[0].upper() + dfC_filt = dfC[dfC["Display Name"] == target_capacity] + if len(dfC_filt) == 0: + raise ValueError( + f"{icons.red_dot} The '{target_capacity}' capacity does not exist." + ) + target_capacity_id = dfC_filt["Id"].iloc[0].upper() + + client = fabric.PowerBIRestClient() + response_get_source = client.get(f"capacities/{source_capacity_id}/config") + if response_get_source.status_code != 200: + raise FabricHTTPException(response_get_source) + + request_body = {} + value = response_get_source.json()["bcdr"]["config"] + request_body["config"] = value + + response_put = client.put( + f"capacities/{target_capacity_id}/fabricbcdr", json=request_body + ) + + if response_put.status_code != 202: + raise FabricHTTPException(response_put) + print( + f"{icons.green_dot} The disaster recovery settings have been migrated from the '{source_capacity}' capacity to the '{target_capacity}' capacity." + ) + + +@log +def migrate_access_settings(source_capacity: str, target_capacity: str): + """ + This function migrates the access settings from a source capacity to a target capacity. + + Parameters + ---------- + source_capacity : str + Name of the source capacity. + target_capacity : str + Name of the target capacity. + + Returns + ------- + """ + + dfC = fabric.list_capacities() + dfC_filt = dfC[dfC["Display Name"] == source_capacity] + if len(dfC_filt) == 0: + raise ValueError( + f"{icons.red_dot} The '{source_capacity}' capacity does not exist." + ) + source_capacity_id = dfC_filt["Id"].iloc[0].upper() + dfC_filt = dfC[dfC["Display Name"] == target_capacity] + if len(dfC_filt) == 0: + raise ValueError( + f"{icons.red_dot} The '{target_capacity}' capacity does not exist." + ) + target_capacity_id = dfC_filt["Id"].iloc[0].upper() + + client = fabric.PowerBIRestClient() + response_get_source = client.get(f"capacities/{source_capacity_id}") + if response_get_source.status_code != 200: + raise FabricHTTPException(response_get_source) + + access_settings = response_get_source.json().get("access", {}) + + response_put = client.put( + f"capacities/{target_capacity_id}/access", + json=access_settings, + ) + if response_put.status_code != 204: + raise FabricHTTPException(response_put) + + print( + f"{icons.green_dot} The access settings have been migrated from the '{source_capacity}' capacity to the '{target_capacity}' capacity." + ) + + +@log +def migrate_notification_settings(source_capacity: str, target_capacity: str): + """ + This function migrates the notification settings from a source capacity to a target capacity. + + Parameters + ---------- + source_capacity : str + Name of the source capacity. + target_capacity : str + Name of the target capacity. + + Returns + ------- + """ + + dfC = fabric.list_capacities() + dfC_filt = dfC[dfC["Display Name"] == source_capacity] + if len(dfC_filt) == 0: + raise ValueError( + f"{icons.red_dot} The '{source_capacity}' capacity does not exist." + ) + source_capacity_id = dfC_filt["Id"].iloc[0].upper() + dfC_filt = dfC[dfC["Display Name"] == target_capacity] + if len(dfC_filt) == 0: + raise ValueError( + f"{icons.red_dot} The '{target_capacity}' capacity does not exist." + ) + target_capacity_id = dfC_filt["Id"].iloc[0].upper() + + client = fabric.PowerBIRestClient() + response_get_source = client.get(f"capacities/{source_capacity_id}") + if response_get_source.status_code != 200: + raise FabricHTTPException(response_get_source) + + notification_settings = response_get_source.json().get( + "capacityNotificationSettings", {} + ) + + response_put = client.put( + f"capacities/{target_capacity_id}/notificationSettings", + json=notification_settings, + ) + if response_put.status_code != 204: + raise FabricHTTPException(response_put) + + print( + f"{icons.green_dot} The notification settings have been migrated from the '{source_capacity}' capacity to the '{target_capacity}' capacity." + ) + + +@log +def migrate_delegated_tenant_settings(source_capacity: str, target_capacity: str): + """ + This function migrates the delegated tenant settings from a source capacity to a target capacity. + + Parameters + ---------- + source_capacity : str + Name of the source capacity. + target_capacity : str + Name of the target capacity. + + Returns + ------- + """ + + dfC = fabric.list_capacities() + + dfC_filt = dfC[dfC["Display Name"] == source_capacity] + if len(dfC_filt) == 0: + raise ValueError( + f"{icons.red_dot} The '{source_capacity}' capacity does not exist." + ) + source_capacity_id = dfC_filt["Id"].iloc[0].upper() + + dfC_filt = dfC[dfC["Display Name"] == target_capacity] + if len(dfC_filt) == 0: + raise ValueError( + f"{icons.red_dot} The '{target_capacity}' capacity does not exist." + ) + target_capacity_id = dfC_filt["Id"].iloc[0].upper() + + client = fabric.FabricRestClient() + response_get = client.get("v1/admin/capacities/delegatedTenantSettingOverrides") + + if response_get.status_code != 200: + raise FabricHTTPException(response_get) + + response_json = response_get.json().get("Overrides", []) + + for o in response_json: + if o.get("id").upper() == source_capacity_id: + for setting in o.get("tenantSettings", []): + setting_name = setting.get("settingName") + feature_switch = { + "switchId": -1, + "switchName": setting_name, + "isEnabled": setting.get("enabled", False), + "isGranular": setting.get("canSpecifySecurityGroups", False), + "allowedSecurityGroups": [ + { + "id": group.get("graphId"), + "name": group.get("name"), + "isEmailEnabled": False, + } + for group in setting.get("enabledSecurityGroups", []) + ], + "deniedSecurityGroups": [ + { + "id": group.get("graphId"), + "name": group.get("name"), + "isEmailEnabled": False, + } + for group in setting.get("excludedSecurityGroups", []) + ], + } + + payload = {"featureSwitches": [feature_switch], "properties": []} + + client = fabric.PowerBIRestClient() + response_put = client.put( + f"metadata/tenantsettings/selfserve?capacityObjectId={target_capacity_id}", + json=payload, + ) + if response_put.status_code != 200: + raise FabricHTTPException(response_put) + + print( + f"{icons.green_dot} The delegated tenant settings for the '{setting_name}' feature switch of the '{source_capacity}' capacity have been migrated to the '{target_capacity}' capacity." + ) + + +@log +def migrate_spark_settings(source_capacity: str, target_capacity: str): + """ + This function migrates a capacity's spark settings to another capacity. + + Requirement: The target capacity must be able to accomodate the spark pools being migrated from the source capacity. + + Parameters + ---------- + source_capacity : str + Name of the source capacity. + target_capacity : str + Name of the target capacity. + """ + + source_capacity_id = resolve_capacity_id(capacity_name=source_capacity) + target_capacity_id = resolve_capacity_id(capacity_name=target_capacity) + client = fabric.PowerBIRestClient() + + # Get source capacity server dns + response = client.get(f"metadata/capacityInformation/{source_capacity_id}") + if response.status_code != 200: + raise FabricHTTPException(response) + + source_server_dns = response.json().get("capacityDns") + source_url = f"{source_server_dns}/webapi/capacities" + + # Get target capacity server dns + response = client.get(f"metadata/capacityInformation/{target_capacity_id}") + if response.status_code != 200: + raise FabricHTTPException(response) + + target_server_dns = response.json().get("capacityDns") + target_url = f"{target_server_dns}/webapi/capacities" + + # Construct get and put URLs + end_url = "workloads/SparkCore/SparkCoreService/automatic/v1/sparksettings" + get_url = f"{source_url}/{source_capacity_id}/{end_url}" + put_url = f"{target_url}/{target_capacity_id}/{end_url}/content" + + # Get source capacity spark settings + response = client.get(get_url) + if response.status_code != 200: + raise FabricHTTPException(response) + + payload = response.json().get("content") + + # Update target capacity spark settings + response_put = client.put(put_url, json=payload) + + if response_put.status_code != 200: + raise FabricHTTPException(response_put) + print( + f"{icons.green_dot} The spark settings have been migrated from the '{source_capacity}' capacity to the '{target_capacity}' capacity." + ) diff --git a/src/sempy_labs/_helper_functions.py b/src/sempy_labs/_helper_functions.py index 0410afcf..a80dda1f 100644 --- a/src/sempy_labs/_helper_functions.py +++ b/src/sempy_labs/_helper_functions.py @@ -992,34 +992,6 @@ def get_language_codes(languages: str | List[str]): return languages -def resolve_environment_id(environment: str, workspace: Optional[str] = None) -> UUID: - """ - Obtains the environment Id for a given environment. - - Parameters - ---------- - environment: str - Name of the environment. - workspace : str, default=None - The Fabric workspace name. - Defaults to None which resolves to the workspace of the attached lakehouse - or if no lakehouse attached, resolves to the workspace of the notebook. - """ - - from sempy_labs._environments import list_environments - - (workspace, workspace_id) = resolve_workspace_name_and_id(workspace) - - dfE = list_environments(workspace=workspace) - dfE_filt = dfE[dfE["Environment Name"] == environment] - if len(dfE_filt) == 0: - raise ValueError( - f"{icons.red_dot} The '{environment}' environment does not exist within the '{workspace}' workspace." - ) - - return dfE_filt["Environment Id"].iloc[0] - - def get_azure_token_credentials( key_vault_uri: str, key_vault_tenant_id: str, @@ -1048,3 +1020,34 @@ def get_azure_token_credentials( } return token, credential, headers + + +def convert_to_alphanumeric_lowercase(input_string): + + cleaned_string = re.sub(r"[^a-zA-Z0-9]", "", input_string) + cleaned_string = cleaned_string.lower() + + return cleaned_string + + +def resolve_environment_id(environment: str, workspace: Optional[str] = None) -> UUID: + """ + Obtains the environment Id for a given environment. + + Parameters + ---------- + environment: str + Name of the environment. + """ + from sempy_labs._environments import list_environments + + (workspace, workspace_id) = resolve_workspace_name_and_id(workspace) + + dfE = list_environments(workspace=workspace) + dfE_filt = dfE[dfE["Environment Name"] == environment] + if len(dfE_filt) == 0: + raise ValueError( + f"{icons.red_dot} The '{environment}' environment does not exist within the '{workspace}' workspace." + ) + + return dfE_filt["Environment Id"].iloc[0] diff --git a/src/sempy_labs/_icons.py b/src/sempy_labs/_icons.py index be5b4ec3..f7e7ab18 100644 --- a/src/sempy_labs/_icons.py +++ b/src/sempy_labs/_icons.py @@ -74,3 +74,22 @@ } workspace_roles = ["Admin", "Member", "Viewer", "Contributor"] principal_types = ["App", "Group", "None", "User"] +azure_api_version = "2023-11-01" +migrate_capacity_suffix = "fsku" +sku_mapping = { + "A1": "F8", + "EM1": "F8", + "A2": "F16", + "EM2": "F16", + "A3": "F32", + "EM3": "F32", + "A4": "F64", + "P1": "F64", + "A5": "F128", + "P2": "F128", + "A6": "F256", + "P3": "F256", + "A7": "F512", + "P4": "F512", + "P5": "F1024", +} diff --git a/src/sempy_labs/_list_functions.py b/src/sempy_labs/_list_functions.py index 4657b874..59c2e294 100644 --- a/src/sempy_labs/_list_functions.py +++ b/src/sempy_labs/_list_functions.py @@ -8,6 +8,7 @@ lro, resolve_item_type, format_dax_object_name, + pagination, ) import pandas as pd from typing import Optional diff --git a/src/sempy_labs/admin/__init__.py b/src/sempy_labs/admin/__init__.py new file mode 100644 index 00000000..d29cf2cf --- /dev/null +++ b/src/sempy_labs/admin/__init__.py @@ -0,0 +1,53 @@ +from sempy_labs.admin._basic_functions import ( + assign_workspaces_to_capacity, + list_capacities, + list_tenant_settings, + list_capacities_delegated_tenant_settings, + unassign_workspaces_from_capacity, + list_external_data_shares, + revoke_external_data_share, + list_workspaces, + list_datasets, + list_item_access_details, + list_access_entities, + list_workspace_access_details, + list_items, +) +from sempy_labs.admin._domains import ( + list_domains, + list_domain_workspaces, + assign_domain_workspaces, + assign_domain_workspaces_by_capacities, + create_domain, + update_domain, + delete_domain, + resolve_domain_id, + unassign_domain_workspaces, + unassign_all_domain_workspaces, +) + +__all__ = [ + "list_items", + "list_workspace_access_details", + "list_access_entities", + "list_item_access_details", + "list_datasets", + "list_workspaces", + "assign_workspaces_to_capacity", + "list_capacities", + "list_tenant_settings", + "list_domains", + "list_domain_workspaces", + "assign_domain_workspaces", + "assign_domain_workspaces_by_capacities", + "create_domain", + "update_domain", + "delete_domain", + "resolve_domain_id", + "unassign_domain_workspaces", + "unassign_all_domain_workspaces", + "list_capacities_delegated_tenant_settings", + "unassign_workspaces_from_capacity", + "list_external_data_shares", + "revoke_external_data_share", +] diff --git a/src/sempy_labs/admin/_basic_functions.py b/src/sempy_labs/admin/_basic_functions.py new file mode 100644 index 00000000..e7e2f99c --- /dev/null +++ b/src/sempy_labs/admin/_basic_functions.py @@ -0,0 +1,802 @@ +import sempy.fabric as fabric +from typing import Optional, List, Union +from uuid import UUID +import sempy_labs._icons as icons +from sempy.fabric.exceptions import FabricHTTPException +from sempy_labs._helper_functions import resolve_workspace_name_and_id, pagination +import datetime +import numpy as np +import pandas as pd +import time + + +def list_workspaces( + top: Optional[int] = 5000, skip: Optional[int] = None +) -> pd.DataFrame: + """ + Lists workspaces for the organization. This function is the admin version of list_workspaces. + + Parameters + ---------- + top : int, default=5000 + Returns only the first n results. This parameter is mandatory and must be in the range of 1-5000. + skip : int, default=None + Skips the first n results. Use with top to fetch results beyond the first 5000. + + Returns + ------- + pandas.DataFrame + A pandas dataframe showing a list of workspaces for the organization. + """ + + df = pd.DataFrame( + columns=[ + "Id", + "Is Read Only", + "Is On Dedicated Capacity", + "Type", + "Name", + "Capacity Id", + "Default Dataset Storage Format", + "Pipeline Id", + "Has Workspace Level Settings", + ] + ) + + url = f"/v1.0/myorg/admin/groups?$top={top}" + if skip is not None: + url = f"{url}&$skip={skip}" + + client = fabric.PowerBIRestClient() + response = client.get(url) + + if response.status_code != 200: + raise FabricHTTPException(response) + + for v in response.json().get("value", []): + capacity_id = v.get("capacityId") + if capacity_id: + capacity_id = capacity_id.lower() + new_data = { + "Id": v.get("id"), + "Is Read Only": v.get("isReadOnly"), + "Is On Dedicated Capacity": v.get("isOnDedicatedCapacity"), + "Capacity Id": capacity_id, + "Default Dataset Storage Format": v.get("defaultDatasetStorageFormat"), + "Type": v.get("type"), + "Name": v.get("name"), + "State": v.get("state"), + "Pipeline Id": v.get("pipelineId"), + "Has Workspace Level Settings": v.get("hasWorkspaceLevelSettings"), + } + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + + bool_cols = [ + "Is Read Only", + "Is On Dedicated Capacity", + "Has Workspace Level Settings", + ] + df[bool_cols] = df[bool_cols].astype(bool) + + return df + + +def assign_workspaces_to_capacity( + source_capacity: str, + target_capacity: str, + workspace: Optional[str | List[str]] = None, +): + """ + Assigns a workspace to a capacity. This function is the admin version. + + Parameters + ---------- + source_capacity : str + The name of the source capacity. + target_capacity : str + The name of the target capacity. + workspace : str | List[str], default=None + The name of the workspace(s). + Defaults to None which resolves to migrating all workspaces within the source capacity to the target capacity. + """ + + if isinstance(workspace, str): + workspace = [workspace] + + dfC = fabric.list_capacities() + dfC_filt = dfC[dfC["Display Name"] == source_capacity] + source_capacity_id = dfC_filt["Id"].iloc[0] + + dfC_filt = dfC[dfC["Display Name"] == target_capacity] + target_capacity_id = dfC_filt["Id"].iloc[0] + + if workspace is None: + workspaces = fabric.list_workspaces( + filter=f"capacityId eq '{source_capacity_id.upper()}'" + )["Id"].values + else: + dfW = fabric.list_workspaces() + workspaces = dfW[dfW["Name"].isin(workspace)]["Id"].values + + workspaces = np.array(workspaces) + batch_size = 999 + for i in range(0, len(workspaces), batch_size): + batch = workspaces[i : i + batch_size].tolist() + batch_length = len(batch) + start_time = datetime.datetime.now() + request_body = { + "capacityMigrationAssignments": [ + { + "targetCapacityObjectId": target_capacity_id.upper(), + "workspacesToAssign": batch, + } + ] + } + + client = fabric.PowerBIRestClient() + response = client.post( + "/v1.0/myorg/admin/capacities/AssignWorkspaces", + json=request_body, + ) + + if response.status_code != 200: + raise FabricHTTPException(response) + end_time = datetime.datetime.now() + print( + f"Total time for assigning {str(batch_length)} workspaces is {str((end_time - start_time).total_seconds())}" + ) + print( + f"{icons.green_dot} The workspaces have been assigned to the '{target_capacity}' capacity." + ) + + +def list_capacities() -> pd.DataFrame: + """ + Shows the a list of capacities and their properties. This function is the admin version. + + Parameters + ---------- + + Returns + ------- + pandas.DataFrame + A pandas dataframe showing the capacities and their properties + """ + + df = pd.DataFrame( + columns=["Capacity Id", "Capacity Name", "Sku", "Region", "State", "Admins"] + ) + + client = fabric.PowerBIRestClient() + response = client.get("/v1.0/myorg/admin/capacities") + if response.status_code != 200: + raise FabricHTTPException(response) + + responses = pagination(client, response) + + for r in responses: + for i in r.get("value", []): + new_data = { + "Capacity Id": i.get("id").lower(), + "Capacity Name": i.get("displayName"), + "Sku": i.get("sku"), + "Region": i.get("region"), + "State": i.get("state"), + "Admins": [i.get("admins", [])], + } + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + + return df + + +def list_tenant_settings() -> pd.DataFrame: + """ + Lists all tenant settings. + + Returns + ------- + pandas.DataFrame + A pandas dataframe showing the tenant settings. + """ + + # https://learn.microsoft.com/en-us/rest/api/fabric/admin/tenants/list-tenant-settings?tabs=HTTP + + client = fabric.FabricRestClient() + response = client.get("/v1/admin/tenantsettings") + + if response.status_code != 200: + raise FabricHTTPException(response) + + df = pd.DataFrame( + columns=[ + "Setting Name", + "Title", + "Enabled", + "Can Specify Security Groups", + "Tenant Setting Group", + "Enabled Security Groups", + ] + ) + + for i in response.json().get("tenantSettings", []): + new_data = { + "Setting Name": i.get("settingName"), + "Title": i.get("title"), + "Enabled": i.get("enabled"), + "Can Specify Security Groups": i.get("canSpecifySecurityGroups"), + "Tenant Setting Group": i.get("tenantSettingGroup"), + "Enabled Security Groups": [i.get("enabledSecurityGroups", [])], + } + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + + bool_cols = ["Enabled", "Can Specify Security Groups"] + df[bool_cols] = df[bool_cols].astype(bool) + + return df + + +def _list_capacities_meta() -> pd.DataFrame: + + df = pd.DataFrame( + columns=["Capacity Id", "Capacity Name", "Sku", "Region", "State", "Admins"] + ) + + client = fabric.PowerBIRestClient() + try: + response = client.get("/v1.0/myorg/admin/capacities") + except Exception as e: + if e.status_code not in [200, 401]: + raise FabricHTTPException(response) + elif e.status_code == 401: + response = client.get("/v1.0/myorg/capacities") + + for i in response.json().get("value", []): + new_data = { + "Capacity Id": i.get("id").lower(), + "Capacity Name": i.get("displayName"), + "Sku": i.get("sku"), + "Region": i.get("region"), + "State": i.get("state"), + "Admins": [i.get("admins", [])], + } + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + + return df + + +def unassign_workspaces_from_capacity(workspaces: str | List[str]): + """ + Unassigns workspace(s) from their capacity. This function is the admin version of list_workspaces. + + Parameters + ---------- + workspaces : str | List[str] + The Fabric workspace name(s). + """ + + # https://learn.microsoft.com/en-us/rest/api/power-bi/admin/capacities-unassign-workspaces-from-capacity + + if isinstance(workspaces, str): + workspaces = [workspaces] + + payload = {"workspacesToUnassign": workspaces} + + client = fabric.PowerBIRestClient() + response = client.post( + "/v1.0/myorg/admin/capacities/UnassignWorkspaces", + json=payload, + ) + + if response.status_code != 200: + raise FabricHTTPException(response) + + print(f"{icons.green_dot} The workspaces have been unassigned.") + + +def list_external_data_shares(): + """ + Lists external data shares in the tenant. This function is for admins. + + Returns + ------- + pandas.DataFrame + A pandas dataframe showing a list of external data shares in the tenant. + """ + + # https://learn.microsoft.com/en-us/rest/api/fabric/admin/external-data-shares/list-external-data-shares?tabs=HTTP + + df = pd.DataFrame( + columns=[ + "External Data Share Id", + "Paths", + "Creater Principal Id", + "Creater Principal Name", + "Creater Principal Type", + "Creater Principal UPN", + "Recipient UPN", + "Status", + "Expiration Time UTC", + "Workspace Id", + "Item Id", + "Invitation URL", + ] + ) + + client = fabric.FabricRestClient() + response = client.get("/v1/admin/items/externalDataShares") + + if response.status_code != 200: + raise FabricHTTPException(response) + + for i in response.json().get("value", []): + cp = i.get("creatorPrincipal", {}) + new_data = { + "External Data Share Id": i.get("id"), + "Paths": [i.get("paths", [])], + "Creater Principal Id": cp.get("id"), + "Creater Principal Name": cp.get("displayName"), + "Creater Principal Type": cp.get("type"), + "Creater Principal UPN": cp.get("userDetails", {}).get("userPrincipalName"), + "Recipient UPN": i.get("recipient", {}).get("userPrincipalName"), + "Status": i.get("status"), + "Expiration Time UTC": i.get("expirationTimeUtc"), + "Workspace Id": i.get("workspaceId"), + "Item Id": i.get("itemId"), + "Invitation URL": i.get("invitationUrl"), + } + + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + + date_time_columns = ["Expiration Time UTC"] + df[date_time_columns] = pd.to_datetime(df[date_time_columns]) + + return df + + +def revoke_external_data_share( + external_data_share_id: UUID, item_id: UUID, workspace: str +): + """ + Revokes the specified external data share. Note: This action cannot be undone. + + Parameters + ---------- + external_data_share_id : UUID + The external data share ID. + item_id : int, default=None + The Item ID + workspace : str + The Fabric workspace name. + """ + + # https://learn.microsoft.com/en-us/rest/api/fabric/admin/external-data-shares/revoke-external-data-share?tabs=HTTP + + (workspace, workspace_id) = resolve_workspace_name_and_id(workspace) + + client = fabric.FabricRestClient() + response = client.post( + f"/v1/admin/workspaces/{workspace_id}/items/{item_id}/externalDataShares/{external_data_share_id}/revoke" + ) + + if response.status_code != 200: + raise FabricHTTPException(response) + + print( + f"{icons.green_dot} The '{external_data_share_id}' external data share for the '{item_id}' item within the '{workspace}' workspace has been revoked." + ) + + +def list_capacities_delegated_tenant_settings(return_dataframe: Optional[bool] = True) -> Optional[pd.Dataframe | dict]: + """ + Returns list of tenant setting overrides that override at the capacities. + + Parameters + ---------- + return_dataframe : bool, default=True + If True, returns a dataframe. If False, returns a dictionary + + Returns + ------- + pandas.DataFrame + A pandas dataframe showing a list of tenant setting overrides that override at the capacities. + """ + + # https://learn.microsoft.com/en-us/rest/api/fabric/admin/tenants/list-capacities-tenant-settings-overrides?tabs=HTTP + + df = pd.DataFrame( + columns=[ + "Capacity Id", + "Setting Name", + "Setting Title", + "Setting Enabled", + "Can Specify Security Groups", + "Enabled Security Groups", + "Tenant Setting Group", + "Tenant Setting Properties", + "Delegate to Workspace", + "Delegated From", + ] + ) + + client = fabric.FabricRestClient() + response = client.get("/v1/admin/capacities/delegatedTenantSettingOverrides") + + if response.status_code != 200: + raise FabricHTTPException(response) + + responses = pagination(client, response) + + if return_dataframe: + for r in responses: + for i in r.get("Overrides", []): + tenant_settings = i.get("tenantSettings", []) + for setting in tenant_settings: + new_data = { + "Capacity Id": i.get("id"), + "Setting Name": setting.get("settingName"), + "Setting Title": setting.get("title"), + "Setting Enabled": setting.get("enabled"), + "Can Specify Security Groups": setting.get( + "canSpecifySecurityGroups" + ), + "Enabled Security Groups": [ + setting.get("enabledSecurityGroups", []) + ], + "Tenant Setting Group": setting.get("tenantSettingGroup"), + "Tenant Setting Properties": [setting.get("properties", [])], + "Delegate to Workspace": setting.get("delegateToWorkspace"), + "Delegated From": setting.get("delegatedFrom"), + } + + df = pd.concat( + [df, pd.DataFrame(new_data, index=[0])], ignore_index=True + ) + + bool_cols = [ + "Enabled Security Groups", + "Can Specify Security Groups", + "Delegate to Workspace", + ] + df[bool_cols] = df[bool_cols].astype(bool) + + return df + else: + combined_response = { + "overrides": [], + "continuationUri": "", + "continuationToken": "", + } + for r in responses: + combined_response["overrides"].extend(r["overrides"]) + combined_response["continuationUri"] = r["continuationUri"] + combined_response["continuationToken"] = r["continuationToken"] + + return combined_response + + +def scan_workspaces( + data_source_details: Optional[bool] = False, + dataset_schema: Optional[bool] = False, + dataset_expressions: Optional[bool] = False, + lineage: Optional[bool] = False, + artifact_users: Optional[bool] = False, + workspace: Optional[str | List[str]] = None, +) -> dict: + + workspace = fabric.resolve_workspace_name(workspace) + + if isinstance(workspace, str): + workspace = [workspace] + + workspace_list = [] + + for w in workspace: + workspace_list.append(fabric.resolve_workspace_id(w)) + + client = fabric.PowerBIRestClient() + request_body = {"workspaces": workspace_list} + + response_clause = f"/v1.0/myorg/admin/workspaces/getInfo?lineage={lineage}&datasourceDetails={data_source_details}&datasetSchema={dataset_schema}&datasetExpressions={dataset_expressions}&getArtifactUsers={artifact_users}" + response = client.post(response_clause, json=request_body) + + if response.status_code != 202: + raise FabricHTTPException(response) + scan_id = response.json()["id"] + scan_status = response.json().get("status") + while scan_status not in ["Succeeded", "Failed"]: + time.sleep(1) + response = client.get(f"/v1.0/myorg/admin/workspaces/scanStatus/{scan_id}") + scan_status = response.json().get("status") + if scan_status == "Failed": + raise FabricHTTPException(response) + response = client.get(f"/v1.0/myorg/admin/workspaces/scanResult/{scan_id}") + if response.status_code != 200: + raise FabricHTTPException(response) + + return response.json() + + +def list_datasets() -> pd.DataFrame: + + df = pd.DataFrame( + columns=[ + "Dataset Id", + "Dataset Name", + "Web URL", + "Add Rows API Enabled", + "Configured By", + "Is Refreshable", + "Is Effective Identity Required", + "Is Effective Identity Roles Required", + "Target Storage Mode", + "Created Date", + "Content Provider Type", + "Create Report Embed URL", + "QnA Embed URL", + "Upstream Datasets", + "Users", + "Is In Place Sharing Enabled", + "Workspace Id", + "Auto Sync Read Only Replicas", + "Max Read Only Replicas", + ] + ) + + client = fabric.FabricRestClient() + + response = client.get("/v1.0/myorg/admin/datasets") + + if response.status_code != 200: + raise FabricHTTPException(response) + + for v in response.json().get("value", []): + new_data = { + "Dataset Id": v.get("id"), + "Dataset Name": v.get("name"), + "Web URL": v.get("webUrl"), + "Add Rows API Enabled": v.get("addRowsAPIEnabled"), + "Configured By": v.get("configuredBy"), + "Is Refreshable": v.get("isRefreshable"), + "Is Effective Identity Required": v.get("isEffectiveIdentityRequired"), + "Is Effective Identity Roles Required": v.get( + "isEffectiveIdentityRolesRequired" + ), + "Target Storage Mode": v.get("targetStorageMode"), + "Created Date": pd.to_datetime(v.get("createdDate")), + "Content Provider Type": v.get("contentProviderType"), + "Create Report Embed URL": v.get("createReportEmbedURL"), + "QnA Embed URL": v.get("qnaEmbedURL"), + "Upstream Datasets": v.get("upstreamDatasets", []), + "Users": v.get("users", []), + "Is In Place Sharing Enabled": v.get("isInPlaceSharingEnabled"), + "Workspace Id": v.get("workspaceId"), + "Auto Sync Read Only Replicas": v.get("queryScaleOutSettings", {}).get( + "autoSyncReadOnlyReplicas" + ), + "Max Read Only Replicas": v.get("queryScaleOutSettings", {}).get( + "maxReadOnlyReplicas" + ), + } + df = pd.concat([df, pd.DataFrame([new_data])], ignore_index=True) + + bool_cols = [ + "Add Rows API Enabled", + "Is Refreshable", + "Is Effective Identity Required", + "Is Effective Identity Roles Required", + "Is In Place Sharing Enabled", + "Auto Sync Read Only Replicas", + ] + df[bool_cols] = df[bool_cols].astype(bool) + + df["Created Date"] = pd.to_datetime(df["Created Date"]) + df["Max Read Only Replicas"] = df["Max Read Only Replicas"].astype(int) + + return df + + +def list_item_access_details( + item_name: str, type: str, workspace: Optional[str] = None +) -> pd.DataFrame: + + # https://learn.microsoft.com/en-us/rest/api/fabric/admin/items/list-item-access-details?tabs=HTTP + + workspace = fabric.resolve_workspace_name(workspace) + workspace_id = fabric.resolve_workspace_id(workspace) + item_id = fabric.resolve_item_id( + item_name=item_name, type=type, workspace=workspace + ) + + df = pd.DataFrame( + columns=[ + "User Id", + "User Name", + "User Type", + "User Principal Name", + "Item Name", + "Item Type", + "Item Id", + "Permissions", + "Additional Permissions", + ] + ) + client = fabric.FabricRestClient() + response = client.get(f"/v1/admin/workspaces/{workspace_id}/items/{item_id}/users") + + if response.status_code != 200: + raise FabricHTTPException(response) + + for v in response.json().get("accessDetails", []): + new_data = { + "User Id": v.get("principal", {}).get("id"), + "User Name": v.get("principal", {}).get("displayName"), + "User Type": v.get("principal", {}).get("type"), + "User Principal Name": v.get("principal", {}) + .get("userDetails", {}) + .get("userPrincipalName"), + "Item Type": v.get("itemAccessDetails", {}).get("type"), + "Permissions": v.get("itemAccessDetails", {}).get("permissions"), + "Additional Permissions": v.get("itemAccessDetails", {}).get( + "additionalPermissions" + ), + "Item Name": item_name, + "Item Id": item_id, + } + df = pd.concat([df, pd.DataFrame([new_data])], ignore_index=True) + + return df + + +def list_access_entities( + user_email_address: str, +) -> pd.DataFrame: + + # https://learn.microsoft.com/en-us/rest/api/fabric/admin/users/list-access-entities?tabs=HTTP + + df = pd.DataFrame( + columns=[ + "Item Id", + "Item Name", + "Item Type", + "Permissions", + "Additional Permissions", + ] + ) + client = fabric.FabricRestClient() + response = client.get(f"/v1/admin/users/{user_email_address}/access") + + if response.status_code != 200: + raise FabricHTTPException(response) + + responses = pagination(client, response) + + for r in responses: + for v in r.get("accessEntities", []): + new_data = { + "Item Id": v.get("id"), + "Item Name": v.get("displayName"), + "Item Type": v.get("itemAccessDetails", {}).get("type"), + "Permissions": v.get("itemAccessDetails", {}).get("permissions"), + "Additional Permissions": v.get("itemAccessDetails", {}).get( + "additionalPermissions" + ), + } + df = pd.concat([df, pd.DataFrame([new_data])], ignore_index=True) + + return df + + +def list_workspace_access_details( + workspace: Optional[Union[str, UUID]] = None +) -> pd.DataFrame: + + # https://learn.microsoft.com/en-us/rest/api/fabric/admin/items/list-items?tabs=HTTP + + workspace_name = fabric.resolve_workspace_name(workspace) + workspace_id = fabric.resolve_workspace_id(workspace_name) + + df = pd.DataFrame( + columns=[ + "User Id", + "User Name", + "User Type", + "Workspace Name", + "Workspace Id", + "Workspace Role", + ] + ) + client = fabric.FabricRestClient() + response = client.get(f"/v1/admin/workspaces/{workspace_id}/users") + if response.status_code != 200: + raise FabricHTTPException(response) + + for v in response.json().get("accessDetails", []): + new_data = { + "User Id": v.get("principal", {}).get("id"), + "User Name": v.get("principal", {}).get("displayName"), + "User Type": v.get("principal", {}).get("type"), + "Workspace Name": workspace_name, + "Workspace Id": workspace_id, + "Workspace Role": v.get("workspaceAccessDetails", {}).get("workspaceRole"), + } + df = pd.concat([df, pd.DataFrame([new_data])], ignore_index=True) + + return df + + +def list_items( + capacity_name: Optional[str] = None, + workspace: Optional[str] = None, + state: Optional[str] = None, + type: Optional[str] = None, +) -> pd.DataFrame: + + url = "/v1/admin/items?" + + df = pd.DataFrame( + columns=[ + "Item Id", + "Item Name", + "Type", + "Description", + "State", + "Last Updated Date", + "Creator Principal Id", + "Creator Principal Display Name", + "Creator Principal Type", + "Creator User Principal Name", + "Workspace Id", + "Capacity Id", + ] + ) + + if workspace is not None: + workspace = fabric.resolve_workspace_name(workspace) + workspace_id = fabric.resolve_workspace_id(workspace) + url += f"workspaceId={workspace_id}&" + if capacity_name is not None: + dfC = list_capacities() + dfC_filt = dfC[dfC["Capacity Name"] == capacity_name] + if len(dfC_filt) == 0: + raise ValueError(f"{icons.red_dot} The '{capacity_name}' capacity does not exist.") + capacity_id = dfC_filt["Capacity Id"].iloc[0] + url += f"capacityId={capacity_id}&" + if state is not None: + url += f"state={state}&" + if type is not None: + url += f"type={type}&" + + if url.endswith("?") or url.endswith("&"): + url = url[:-1] + + client = fabric.FabricRestClient() + response = client.get(url) + + if response.status_code != 200: + raise FabricHTTPException(response) + + responses = pagination(client, response) + + for r in responses: + for v in r.get("itemEntities", []): + new_data = { + "Item Id": v.get("id"), + "Type": v.get("type"), + "Item Name": v.get("name"), + "Description": v.get("description"), + "State": v.get("state"), + "Last Updated Date": v.get("lastUpdatedDate"), + "Creator Principal Id": v.get("creatorPrincipal", {}).get("id"), + "Creator Principal Display Name": v.get("creatorPrincipal", {}).get( + "displayName" + ), + "Creator Principal Type": v.get("creatorPrincipal", {}).get("type"), + "Creator User Principal Name": v.get("creatorPrincipal", {}) + .get("userDetails", {}) + .get("userPrincipalName"), + "Workspace Id": v.get("workspaceId"), + "Capacity Id": v.get("capacityId"), + } + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + + return df diff --git a/src/sempy_labs/admin/_domains.py b/src/sempy_labs/admin/_domains.py new file mode 100644 index 00000000..c055f522 --- /dev/null +++ b/src/sempy_labs/admin/_domains.py @@ -0,0 +1,411 @@ +import sempy.fabric as fabric +from typing import Optional, List +import sempy_labs._icons as icons +from sempy_labs._helper_functions import lro +from sempy.fabric.exceptions import FabricHTTPException +import pandas as pd +from uuid import UUID + + +def resolve_domain_id(domain_name: str) -> UUID: + """ + Obtains the domain Id for a given domain name. + + Parameters + ---------- + domain_name : str + The domain name + + Returns + ------- + UUID + The domain Id. + """ + + dfL = list_domains() + dfL_filt = dfL[dfL["Domain Name"] == domain_name] + if len(dfL_filt) == 0: + raise ValueError(f"{icons.red_dot} '{domain_name}' is not a valid domain name.") + + return dfL_filt["Domain ID"].iloc[0] + + +def list_domains(non_empty_only: Optional[bool] = False) -> pd.DataFrame: + """ + Shows a list of domains. + + Parameters + ---------- + non_empty_only : bool, default=False + When True, only return domains that have at least one workspace containing an item. + Defaults to False. + + Returns + ------- + pandas.DataFrame + A pandas dataframe showing a list of the domains. + """ + + # https://learn.microsoft.com/en-us/rest/api/fabric/admin/domains/list-domains?tabs=HTTP + + df = pd.DataFrame( + columns=[ + "Domain ID", + "Domain Name", + "Description", + "Parent Domain ID", + "Contributors Scope", + ] + ) + + client = fabric.FabricRestClient() + url = "/v1/admin/domains" + if non_empty_only: + url = f"{url}?nonEmptyOnly=True" + response = client.get(url) + + if response.status_code != 200: + raise FabricHTTPException(response) + + for v in response.json().get("domains", []): + new_data = { + "Domain ID": v.get("id"), + "Domain Name": v.get("displayName"), + "Description": v.get("description"), + "Parent Domain ID": v.get("parentDomainId"), + "Contributors Scope": v.get("contributorsScope"), + } + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + + return df + + +def list_domain_workspaces(domain_name: str) -> pd.DataFrame: + """ + Shows a list of workspaces within the domain. + + Parameters + ---------- + domain_name : str + The domain name. + + Returns + ------- + pandas.DataFrame + A pandas dataframe showing a list of workspaces within the domain. + """ + + # https://learn.microsoft.com/en-us/rest/api/fabric/admin/domains/list-domain-workspaces?tabs=HTTP + + domain_id = resolve_domain_id(domain_name) + + df = pd.DataFrame(columns=["Workspace ID", "Workspace Name"]) + + client = fabric.FabricRestClient() + response = client.get(f"/v1/admin/domains/{domain_id}/workspaces") + + if response.status_code != 200: + raise FabricHTTPException(response) + + for v in response.json().get("value", []): + new_data = { + "Workspace ID": v.get("id"), + "Workspace Name": v.get("displayName"), + } + df = pd.concat([df, pd.DataFrame(new_data, index=[0])], ignore_index=True) + + return df + + +def create_domain( + domain_name: str, + description: Optional[str] = None, + parent_domain_name: Optional[str] = None, +): + """ + Creates a new domain. + + Parameters + ---------- + domain_name : str + The domain name. + description : str, default=None + The domain description. + parent_domain_name : str, default=None + The parent domain name. + """ + + # https://learn.microsoft.com/en-us/rest/api/fabric/admin/domains/create-domain?tabs=HTTP + + if parent_domain_name is not None: + parent_domain_id = resolve_domain_id(parent_domain_name) + + payload = {} + payload["displayName"] = domain_name + if description is not None: + payload["description"] = description + if parent_domain_name is not None: + payload["parentDomainId"] = parent_domain_id + + client = fabric.FabricRestClient() + response = client.post("/v1/admin/domains", json=payload) + + if response.status_code != 201: + raise FabricHTTPException(response) + + print(f"{icons.green_dot} The '{domain_name}' domain has been created.") + + +def delete_domain(domain_name: str): + """ + Deletes a domain. + + Parameters + ---------- + domain_name : str + The domain name. + """ + + # https://learn.microsoft.com/en-us/rest/api/fabric/admin/domains/delete-domain?tabs=HTTP + + domain_id = resolve_domain_id(domain_name) + + client = fabric.FabricRestClient() + response = client.delete(f"/v1/admin/domains/{domain_id}") + + if response.status_code != 200: + raise FabricHTTPException(response) + + print(f"{icons.green_dot} The '{domain_name}' domain has been deleted.") + + +def update_domain( + domain_name: str, + description: Optional[str] = None, + contributors_scope: Optional[str] = None, +): + """ + Updates a domain's properties. + + Parameters + ---------- + domain_name : str + The domain name. + description : str, default=None + The domain description. + contributors_scope : str, default=None + The domain `contributor scope `_. + """ + + # https://learn.microsoft.com/en-us/rest/api/fabric/admin/domains/update-domain?tabs=HTTP + + contributors_scopes = ["AdminsOnly", "AllTenant", "SpecificUsersAndGroups"] + + if contributors_scope not in contributors_scopes: + raise ValueError( + f"{icons.red_dot} Invalid contributors scope. Valid options: {contributors_scopes}." + ) + + domain_id = resolve_domain_id(domain_name) + + payload = {} + payload["displayName"] = domain_name + + if description is not None: + payload["description"] = description + if contributors_scope is not None: + payload["contributorsScope"] = contributors_scope + + client = fabric.FabricRestClient() + response = client.patch(f"/v1/admin/domains/{domain_id}", json=payload) + + if response != 200: + raise FabricHTTPException(response) + + print(f"{icons.green_dot} The '{domain_name}' domain has been updated.") + + +def assign_domain_workspaces_by_capacities( + domain_name: str, capacity_names: str | List[str] +): + """ + Assigns all workspaces that reside on the specified capacities to the specified domain. + + Parameters + ---------- + domain_name : str + The domain name. + capacity_names : str | List[str] + The capacity names. + """ + + # https://learn.microsoft.com/en-us/rest/api/fabric/admin/domains/assign-domain-workspaces-by-capacities?tabs=HTTP + + from sempy_labs.admin import list_capacities + + domain_id = resolve_domain_id(domain_name) + + if isinstance(capacity_names, str): + capacity_names = [capacity_names] + + dfC = list_capacities() + + # Check for invalid capacities + invalid_capacities = [ + name for name in capacity_names if name not in dfC["Display Name"].values + ] + + if len(invalid_capacities) == 1: + raise ValueError( + f"{icons.red_dot} The {invalid_capacities} capacity is not valid." + ) + elif len(invalid_capacities) > 1: + raise ValueError( + f"{icons.red_dot} The {invalid_capacities} capacities are not valid." + ) + + # Get list of capacity Ids for the payload + dfC_filt = dfC[dfC["Display Name"].isin(capacity_names)] + capacity_list = list(dfC_filt["Id"].str.upper()) + + payload = {"capacitiesIds": capacity_list} + + client = fabric.FabricRestClient() + response = client.post( + f"/v1/admin/domains/{domain_id}/assignWorkspacesByCapacities", + json=payload, + ) + + lro(client, response) + + print( + f"{icons.green_dot} The workspaces in the {capacity_names} capacities have been assigned to the '{domain_name}' domain." + ) + + +def assign_domain_workspaces(domain_name: str, workspace_names: str | List[str]): + """ + Assigns workspaces to the specified domain by workspace. + + Parameters + ---------- + domain_name : str + The domain name. + workspace_names : str | List[str] + The Fabric workspace(s). + """ + + # https://learn.microsoft.com/en-us/rest/api/fabric/admin/domains/assign-domain-workspaces-by-ids?tabs=HTTP + + domain_id = resolve_domain_id(domain_name=domain_name) + + if isinstance(workspace_names, str): + workspace_names = [workspace_names] + + dfW = fabric.list_workspaces() + + # Check for invalid capacities + invalid_workspaces = [ + name for name in workspace_names if name not in dfW["Name"].values + ] + + if len(invalid_workspaces) == 1: + raise ValueError( + f"{icons.red_dot} The {invalid_workspaces} workspace is not valid." + ) + elif len(invalid_workspaces) > 1: + raise ValueError( + f"{icons.red_dot} The {invalid_workspaces} workspaces are not valid." + ) + + dfW_filt = dfW[dfW["Name"].isin(workspace_names)] + workspace_list = list(dfW_filt["Id"]) + + payload = {"workspacesIds": workspace_list} + + client = fabric.FabricRestClient() + response = client.post( + f"/v1/admin/domains/{domain_id}/assignWorkspaces", + json=payload, + ) + + lro(client, response) + + print( + f"{icons.green_dot} The {workspace_names} workspaces have been assigned to the '{domain_name}' domain." + ) + + +def unassign_all_domain_workspaces(domain_name: str): + """ + Unassigns all workspaces from the specified domain. + + Parameters + ---------- + domain_name : str + The domain name. + """ + + # https://learn.microsoft.com/en-us/rest/api/fabric/admin/domains/unassign-all-domain-workspaces?tabs=HTTP + + domain_id = resolve_domain_id(domain_name=domain_name) + + client = fabric.FabricRestClient() + response = client.post(f"/v1/admin/domains/{domain_id}/unassignAllWorkspaces") + + if response.status_code != 200: + raise FabricHTTPException(response) + print( + f"{icons.green_dot} All workspaces assigned to the '{domain_name}' domain have been unassigned." + ) + + +def unassign_domain_workspaces(domain_name: str, workspace_names: str | List[str]): + """ + Unassigns workspaces from the specified domain by workspace. + + Parameters + ---------- + domain_name : str + The domain name. + workspace_names : str | List[str] + The Fabric workspace(s). + """ + + # https://learn.microsoft.com/en-us/rest/api/fabric/admin/domains/unassign-domain-workspaces-by-ids?tabs=HTTP + + domain_id = resolve_domain_id(domain_name=domain_name) + + if isinstance(workspace_names, str): + workspace_names = [workspace_names] + + dfW = fabric.list_workspaces() + + # Check for invalid capacities + invalid_workspaces = [ + name for name in workspace_names if name not in dfW["Name"].values + ] + + if len(invalid_workspaces) == 1: + raise ValueError( + f"{icons.red_dot} The {invalid_workspaces} workspace is not valid." + ) + elif len(invalid_workspaces) > 1: + raise ValueError( + f"{icons.red_dot} The {invalid_workspaces} workspaces are not valid." + ) + + dfW_filt = dfW[dfW["Name"].isin(workspace_names)] + workspace_list = list(dfW_filt["Id"]) + + payload = {"workspacesIds": workspace_list} + client = fabric.FabricRestClient() + response = client.post( + f"/v1/admin/domains/{domain_id}/unassignWorkspaces", json=payload + ) + + if response.status_code != 200: + raise FabricHTTPException(response) + print( + f"{icons.green_dot} The {workspace_names} workspaces assigned to the '{domain_name}' domain have been unassigned." + )