diff --git a/src/sempy_labs/_helper_functions.py b/src/sempy_labs/_helper_functions.py index 01495de9..13edf392 100644 --- a/src/sempy_labs/_helper_functions.py +++ b/src/sempy_labs/_helper_functions.py @@ -160,14 +160,34 @@ def resolve_report_name(report_id: UUID, workspace: Optional[str] = None) -> str return obj -def resolve_dataset_id(dataset: str, workspace: Optional[str] = None) -> UUID: +def resolve_dataset_name_and_id( + dataset: str | UUID, workspace: Optional[str] = None +) -> Tuple[str, UUID]: + + (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) + + if _is_valid_uuid(dataset): + dataset_id = dataset + dataset_name = fabric.resolve_item_name( + item_id=dataset_id, type="SemanticModel", workspace=workspace_id + ) + else: + dataset_name = dataset + dataset_id = fabric.resolve_item_id( + item_name=dataset, type="SemanticModel", workspace=workspace_id + ) + + return dataset_name, dataset_id + + +def resolve_dataset_id(dataset: str | UUID, workspace: Optional[str] = None) -> UUID: """ Obtains the ID of the semantic model. Parameters ---------- - dataset : str - The name of the semantic model. + dataset : str | UUID + The name or ID of the semantic model. workspace : str, default=None The Fabric workspace name. Defaults to None which resolves to the workspace of the attached lakehouse @@ -179,15 +199,14 @@ def resolve_dataset_id(dataset: str, workspace: Optional[str] = None) -> UUID: The ID of the semantic model. """ - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) - - obj = fabric.resolve_item_id( - item_name=dataset, type="SemanticModel", workspace=workspace - ) + if _is_valid_uuid(dataset): + dataset_id = dataset + else: + dataset_id = fabric.resolve_item_id( + item_name=dataset, type="SemanticModel", workspace=workspace + ) - return obj + return dataset_id def resolve_dataset_name(dataset_id: UUID, workspace: Optional[str] = None) -> str: @@ -1167,20 +1186,20 @@ def _make_list_unique(my_list): def _get_partition_map(dataset: str, workspace: Optional[str] = None) -> pd.DataFrame: - if workspace is None: - workspace = fabric.resolve_workspace_name() + (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) + (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id) partitions = fabric.evaluate_dax( - dataset=dataset, - workspace=workspace, + dataset=dataset_id, + workspace=workspace_id, dax_string=""" select [ID] AS [PartitionID], [TableID], [Name] AS [PartitionName] from $system.tmschema_partitions """, ) tables = fabric.evaluate_dax( - dataset=dataset, - workspace=workspace, + dataset=dataset_id, + workspace=workspace_id, dax_string=""" select [ID] AS [TableID], [Name] AS [TableName] from $system.tmschema_tables """, diff --git a/src/sempy_labs/_list_functions.py b/src/sempy_labs/_list_functions.py index abef0e46..f4157ccd 100644 --- a/src/sempy_labs/_list_functions.py +++ b/src/sempy_labs/_list_functions.py @@ -7,23 +7,25 @@ pagination, resolve_item_type, format_dax_object_name, + resolve_dataset_name_and_id, ) import pandas as pd from typing import Optional import sempy_labs._icons as icons from sempy.fabric.exceptions import FabricHTTPException +from uuid import UUID def get_object_level_security( - dataset: str, workspace: Optional[str] = None + dataset: str | UUID, workspace: Optional[str] = None ) -> pd.DataFrame: """ Shows the object level security for the semantic model. Parameters ---------- - dataset : str - Name of the semantic model. + dataset : str | UUID + Name or ID of the semantic model. workspace : str, default=None The Fabric workspace name. Defaults to None which resolves to the workspace of the attached lakehouse @@ -37,12 +39,13 @@ def get_object_level_security( from sempy_labs.tom import connect_semantic_model - workspace = fabric.resolve_workspace_name(workspace) + (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) + (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id) df = pd.DataFrame(columns=["Role Name", "Object Type", "Table Name", "Object Name"]) with connect_semantic_model( - dataset=dataset, readonly=True, workspace=workspace + dataset=dataset_id, readonly=True, workspace=workspace_id ) as tom: for r in tom.model.Roles: @@ -82,15 +85,15 @@ def get_object_level_security( def list_tables( - dataset: str, workspace: Optional[str] = None, extended: bool = False + dataset: str | UUID, workspace: Optional[str] = None, extended: bool = False ) -> pd.DataFrame: """ Shows a semantic model's tables and their properties. Parameters ---------- - dataset : str - Name of the semantic model. + dataset : str | UUID + Name or ID of the semantic model. workspace : str, default=None The Fabric workspace name. Defaults to None which resolves to the workspace of the attached lakehouse @@ -106,7 +109,8 @@ def list_tables( from sempy_labs.tom import connect_semantic_model - workspace = fabric.resolve_workspace_name(workspace) + (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) + (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id) df = pd.DataFrame( columns=[ @@ -121,20 +125,20 @@ def list_tables( ) with connect_semantic_model( - dataset=dataset, workspace=workspace, readonly=True + dataset=dataset_id, workspace=workspace_id, readonly=True ) as tom: if extended: dict_df = fabric.evaluate_dax( - dataset=dataset, - workspace=workspace, + dataset=dataset_id, + workspace=workspace_id, dax_string=""" EVALUATE SELECTCOLUMNS(FILTER(INFO.STORAGETABLECOLUMNS(), [COLUMN_TYPE] = "BASIC_DATA"),[DIMENSION_NAME],[DICTIONARY_SIZE]) """, ) dict_sum = dict_df.groupby("[DIMENSION_NAME]")["[DICTIONARY_SIZE]"].sum() data = fabric.evaluate_dax( - dataset=dataset, - workspace=workspace, + dataset=dataset_id, + workspace=workspace_id, dax_string="""EVALUATE SELECTCOLUMNS(INFO.STORAGETABLECOLUMNSEGMENTS(),[TABLE_ID],[DIMENSION_NAME],[USED_SIZE])""", ) data_sum = ( @@ -162,8 +166,8 @@ def list_tables( .sum() ) rc = fabric.evaluate_dax( - dataset=dataset, - workspace=workspace, + dataset=dataset_id, + workspace=workspace_id, dax_string=""" SELECT [DIMENSION_NAME],[ROWS_COUNT] FROM $SYSTEM.DISCOVER_STORAGE_TABLES WHERE RIGHT ( LEFT ( TABLE_ID, 2 ), 1 ) <> '$' @@ -850,15 +854,15 @@ def update_item( def list_relationships( - dataset: str, workspace: Optional[str] = None, extended: bool = False + dataset: str | UUID, workspace: Optional[str] = None, extended: bool = False ) -> pd.DataFrame: """ Shows a semantic model's relationships and their properties. Parameters ---------- - dataset: str - Name of the semantic model. + dataset: str | UUID + Name or UUID of the semantic model. workspace : str, default=None The Fabric workspace name. Defaults to None which resolves to the workspace of the attached lakehouse @@ -872,17 +876,18 @@ def list_relationships( A pandas dataframe showing the object level security for the semantic model. """ - workspace = fabric.resolve_workspace_name(workspace) + (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) + (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id) - dfR = fabric.list_relationships(dataset=dataset, workspace=workspace) + dfR = fabric.list_relationships(dataset=dataset_id, workspace=workspace_id) dfR["From Object"] = format_dax_object_name(dfR["From Table"], dfR["From Column"]) dfR["To Object"] = format_dax_object_name(dfR["To Table"], dfR["To Column"]) if extended: # Used to map the Relationship IDs rel = fabric.evaluate_dax( - dataset=dataset, - workspace=workspace, + dataset=dataset_id, + workspace=workspace_id, dax_string=""" SELECT [ID] AS [RelationshipID] @@ -893,8 +898,8 @@ def list_relationships( # USED_SIZE shows the Relationship Size where TABLE_ID starts with R$ cs = fabric.evaluate_dax( - dataset=dataset, - workspace=workspace, + dataset=dataset_id, + workspace=workspace_id, dax_string=""" SELECT [TABLE_ID] diff --git a/src/sempy_labs/_model_bpa.py b/src/sempy_labs/_model_bpa.py index 9f05b532..69623060 100644 --- a/src/sempy_labs/_model_bpa.py +++ b/src/sempy_labs/_model_bpa.py @@ -10,9 +10,10 @@ create_relationship_name, save_as_delta_table, resolve_workspace_capacity, - resolve_dataset_id, + resolve_dataset_name_and_id, get_language_codes, _get_max_run_id, + resolve_workspace_name_and_id, ) from sempy_labs.lakehouse import get_lakehouse_tables, lakehouse_attached from sempy_labs.tom import connect_semantic_model @@ -23,11 +24,12 @@ from pyspark.sql.functions import col, flatten from pyspark.sql.types import StructType, StructField, StringType import os +from uuid import UUID @log def run_model_bpa( - dataset: str, + dataset: str | UUID, rules: Optional[pd.DataFrame] = None, workspace: Optional[str] = None, export: bool = False, @@ -41,8 +43,8 @@ def run_model_bpa( Parameters ---------- - dataset : str - Name of the semantic model. + dataset : str | UUID + Name or ID of the semantic model. rules : pandas.DataFrame, default=None A pandas dataframe containing rules to be evaluated. workspace : str, default=None @@ -105,7 +107,10 @@ def map_language(language, language_list): if language is not None: language = map_language(language, language_list) - workspace = fabric.resolve_workspace_name(workspace) + (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) + (dataset_name, dataset_id) = resolve_dataset_name_and_id( + dataset, workspace=workspace_id + ) if language is not None and language not in language_list: print( @@ -113,7 +118,7 @@ def map_language(language, language_list): ) with connect_semantic_model( - dataset=dataset, workspace=workspace, readonly=True + dataset=dataset_id, workspace=workspace_id, readonly=True ) as tom: if extended: @@ -122,7 +127,7 @@ def map_language(language, language_list): # Do not run BPA for models with no tables if tom.model.Tables.Count == 0: print( - f"{icons.warning} The '{dataset}' semantic model within the '{workspace}' workspace has no tables and therefore there are no valid BPA results." + f"{icons.warning} The '{dataset_name}' semantic model within the '{workspace_name}' workspace has no tables and therefore there are no valid BPA results." ) finalDF = pd.DataFrame( columns=[ @@ -136,7 +141,9 @@ def map_language(language, language_list): ] ) else: - dep = get_model_calc_dependencies(dataset=dataset, workspace=workspace) + dep = get_model_calc_dependencies( + dataset=dataset_id, workspace=workspace_id + ) def translate_using_po(rule_file): current_dir = os.path.dirname(os.path.abspath(__file__)) @@ -382,20 +389,19 @@ def translate_using_spark(rule_file): runId = max_run_id + 1 now = datetime.datetime.now() - dfD = fabric.list_datasets(workspace=workspace, mode="rest") - dfD_filt = dfD[dfD["Dataset Name"] == dataset] + dfD = fabric.list_datasets(workspace=workspace_id, mode="rest") + dfD_filt = dfD[dfD["Dataset Id"] == dataset_id] configured_by = dfD_filt["Configured By"].iloc[0] - capacity_id, capacity_name = resolve_workspace_capacity(workspace=workspace) + capacity_id, capacity_name = resolve_workspace_capacity(workspace=workspace_id) dfExport["Capacity Name"] = capacity_name dfExport["Capacity Id"] = capacity_id - dfExport["Workspace Name"] = workspace - dfExport["Workspace Id"] = fabric.resolve_workspace_id(workspace) - dfExport["Dataset Name"] = dataset - dfExport["Dataset Id"] = resolve_dataset_id(dataset, workspace) + dfExport["Workspace Name"] = workspace_name + dfExport["Workspace Id"] = workspace_id + dfExport["Dataset Name"] = dataset_name + dfExport["Dataset Id"] = dataset_id dfExport["Configured By"] = configured_by dfExport["Timestamp"] = now dfExport["RunId"] = runId - dfExport["Configured By"] = configured_by dfExport["RunId"] = dfExport["RunId"].astype("int") dfExport = dfExport[list(icons.bpa_schema.keys())] diff --git a/src/sempy_labs/_model_bpa_bulk.py b/src/sempy_labs/_model_bpa_bulk.py index 41ff4b5f..7e0262d5 100644 --- a/src/sempy_labs/_model_bpa_bulk.py +++ b/src/sempy_labs/_model_bpa_bulk.py @@ -119,16 +119,16 @@ def run_model_bpa_bulk( dfD_filt = dfD[~dfD["Dataset Name"].isin(skip_models)] if len(dfD_filt) > 0: - for i2, r2 in dfD_filt.iterrows(): + for _, r2 in dfD_filt.iterrows(): + dataset_id = r2["Dataset Id"] dataset_name = r2["Dataset Name"] config_by = r2["Configured By"] - dataset_id = r2["Dataset Id"] print( f"{icons.in_progress} Collecting Model BPA stats for the '{dataset_name}' semantic model within the '{wksp}' workspace." ) try: bpa_df = run_model_bpa( - dataset=dataset_name, + dataset=dataset_id, workspace=wksp, language=language, return_dataframe=True, diff --git a/src/sempy_labs/_model_dependencies.py b/src/sempy_labs/_model_dependencies.py index 6b632826..4745826b 100644 --- a/src/sempy_labs/_model_dependencies.py +++ b/src/sempy_labs/_model_dependencies.py @@ -1,10 +1,15 @@ import sempy.fabric as fabric import pandas as pd -from sempy_labs._helper_functions import format_dax_object_name +from sempy_labs._helper_functions import ( + format_dax_object_name, + resolve_dataset_name_and_id, + resolve_workspace_name_and_id, +) import sempy_labs._icons as icons from typing import Any, Dict, Optional from anytree import Node, RenderTree from sempy._utils._log import log +from uuid import UUID @log @@ -139,15 +144,15 @@ def get_measure_dependencies( @log def get_model_calc_dependencies( - dataset: str, workspace: Optional[str] = None + dataset: str | UUID, workspace: Optional[str] = None ) -> pd.DataFrame: """ Shows all dependencies for all objects in a semantic model. Parameters ---------- - dataset : str - Name of the semantic model. + dataset : str | UUID + Name or ID of the semantic model. workspace : str, default=None The Fabric workspace name. Defaults to None which resolves to the workspace of the attached lakehouse @@ -159,10 +164,11 @@ def get_model_calc_dependencies( Shows all dependencies for all objects in the semantic model. """ - workspace = fabric.resolve_workspace_name(workspace) + (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) + (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id) dep = fabric.evaluate_dax( - dataset=dataset, - workspace=workspace, + dataset=dataset_id, + workspace=workspace_id, dax_string=""" SELECT [TABLE] AS [Table Name], diff --git a/src/sempy_labs/_refresh_semantic_model.py b/src/sempy_labs/_refresh_semantic_model.py index 028090fe..28eaa38b 100644 --- a/src/sempy_labs/_refresh_semantic_model.py +++ b/src/sempy_labs/_refresh_semantic_model.py @@ -5,6 +5,7 @@ resolve_workspace_name_and_id, _get_partition_map, _process_and_display_chart, + resolve_dataset_name_and_id, ) from typing import Any, List, Optional, Union from sempy._utils._log import log @@ -14,11 +15,12 @@ import warnings import ipywidgets as widgets import json +from uuid import UUID @log def refresh_semantic_model( - dataset: str, + dataset: str | UUID, tables: Optional[Union[str, List[str]]] = None, partitions: Optional[Union[str, List[str]]] = None, refresh_type: str = "full", @@ -34,8 +36,8 @@ def refresh_semantic_model( Parameters ---------- - dataset : str - Name of the semantic model. + dataset : str | UUID + Name or ID of the semantic model. tables : str, List[str], default=None A string or a list of tables to refresh. partitions: str, List[str], default=None @@ -65,7 +67,8 @@ def refresh_semantic_model( If 'visualize' is set to True, returns a pandas dataframe showing the SSAS trace output used to generate the visualization. """ - workspace = fabric.resolve_workspace_name(workspace) + (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) + (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id) if isinstance(tables, str): tables = [tables] @@ -118,11 +121,11 @@ def refresh_and_trace_dataset( def extract_failure_error(): error_messages = [] combined_messages = "" - final_message = f"{icons.red_dot} The refresh of the '{dataset}' semantic model within the '{workspace}' workspace has failed." + final_message = f"{icons.red_dot} The refresh of the '{dataset_name}' semantic model within the '{workspace_name}' workspace has failed." for _, r in fabric.get_refresh_execution_details( refresh_request_id=request_id, - dataset=dataset, - workspace=workspace, + dataset=dataset_id, + workspace=workspace_id, ).messages.iterrows(): error_messages.append(f"{r['Type']}: {r['Message']}") @@ -135,8 +138,8 @@ def extract_failure_error(): # Function to perform dataset refresh def refresh_dataset(): return fabric.refresh_dataset( - dataset=dataset, - workspace=workspace, + dataset=dataset_id, + workspace=workspace_id, refresh_type=refresh_type, retry_count=retry_count, apply_refresh_policy=apply_refresh_policy, @@ -147,7 +150,9 @@ def refresh_dataset(): def check_refresh_status(request_id): request_details = fabric.get_refresh_execution_details( - dataset=dataset, refresh_request_id=request_id, workspace=workspace + dataset=dataset_id, + refresh_request_id=request_id, + workspace=workspace_id, ) return request_details.status @@ -181,7 +186,7 @@ def display_trace_logs(trace, partition_map, widget, title, stop=False): if not visualize: request_id = refresh_dataset() print( - f"{icons.in_progress} Refresh of the '{dataset}' semantic model within the '{workspace}' workspace is in progress..." + f"{icons.in_progress} Refresh of the '{dataset_name}' semantic model within the '{workspace_name}' workspace is in progress..." ) # Monitor refresh progress and handle tracing if visualize is enabled @@ -190,7 +195,7 @@ def display_trace_logs(trace, partition_map, widget, title, stop=False): widget = widgets.Output() with fabric.create_trace_connection( - dataset=dataset, workspace=workspace + dataset=dataset_id, workspace=workspace_id ) as trace_connection: with trace_connection.create_trace(icons.refresh_event_schema) as trace: trace.start() @@ -205,7 +210,7 @@ def display_trace_logs(trace, partition_map, widget, title, stop=False): raise ValueError(extract_failure_error()) elif status == "Cancelled": print( - f"{icons.yellow_dot} The refresh of the '{dataset}' semantic model within the '{workspace}' workspace has been cancelled." + f"{icons.yellow_dot} The refresh of the '{dataset_name}' semantic model within the '{workspace_name}' workspace has been cancelled." ) return @@ -232,7 +237,7 @@ def display_trace_logs(trace, partition_map, widget, title, stop=False): ) print( - f"{icons.green_dot} Refresh '{refresh_type}' of the '{dataset}' semantic model within the '{workspace}' workspace is complete." + f"{icons.green_dot} Refresh '{refresh_type}' of the '{dataset_name}' semantic model within the '{workspace_name}' workspace is complete." ) return final_df @@ -246,14 +251,14 @@ def display_trace_logs(trace, partition_map, widget, title, stop=False): raise ValueError(extract_failure_error()) elif status == "Cancelled": print( - f"{icons.yellow_dot} The refresh of the '{dataset}' semantic model within the '{workspace}' workspace has been cancelled." + f"{icons.yellow_dot} The refresh of the '{dataset_name}' semantic model within the '{workspace_name}' workspace has been cancelled." ) return time.sleep(3) print( - f"{icons.green_dot} Refresh '{refresh_type}' of the '{dataset}' semantic model within the '{workspace}' workspace is complete." + f"{icons.green_dot} Refresh '{refresh_type}' of the '{dataset_name}' semantic model within the '{workspace_name}' workspace is complete." ) final_output = refresh_and_trace_dataset( diff --git a/src/sempy_labs/directlake/_dl_helper.py b/src/sempy_labs/directlake/_dl_helper.py index faf534da..a5395529 100644 --- a/src/sempy_labs/directlake/_dl_helper.py +++ b/src/sempy_labs/directlake/_dl_helper.py @@ -10,19 +10,21 @@ resolve_dataset_id, resolve_lakehouse_name, _convert_data_type, + resolve_dataset_name_and_id, + resolve_workspace_name_and_id, ) def check_fallback_reason( - dataset: str, workspace: Optional[str] = None + dataset: str | UUID, workspace: Optional[str] = None ) -> pd.DataFrame: """ Shows the reason a table in a Direct Lake semantic model would fallback to DirectQuery. Parameters ---------- - dataset : str - Name of the semantic model. + dataset : str | UUID + Name or ID of the semantic model. workspace : str, default=None The Fabric workspace name. Defaults to None which resolves to the workspace of the attached lakehouse @@ -35,19 +37,22 @@ def check_fallback_reason( """ from sempy_labs.tom import connect_semantic_model - workspace = fabric.resolve_workspace_name(workspace) + (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) + (dataset_name, dataset_id) = resolve_dataset_name_and_id( + dataset, workspace=workspace_id + ) with connect_semantic_model( - dataset=dataset, workspace=workspace, readonly=True + dataset=dataset_id, workspace=workspace_id, readonly=True ) as tom: if not tom.is_direct_lake(): raise ValueError( - f"{icons.red_dot} The '{dataset}' semantic model is not in Direct Lake. This function is only applicable to Direct Lake semantic models." + f"{icons.red_dot} The '{dataset_name}' semantic model is not in Direct Lake. This function is only applicable to Direct Lake semantic models." ) df = fabric.evaluate_dax( - dataset=dataset, - workspace=workspace, + dataset=dataset_id, + workspace=workspace_id, dax_string=""" SELECT [TableName] AS [Table Name],[FallbackReason] AS [FallbackReasonID] FROM $SYSTEM.TMSCHEMA_DELTA_TABLE_METADATA_STORAGES diff --git a/src/sempy_labs/tom/_model.py b/src/sempy_labs/tom/_model.py index d5ff4d00..97582a5c 100644 --- a/src/sempy_labs/tom/_model.py +++ b/src/sempy_labs/tom/_model.py @@ -7,6 +7,8 @@ format_dax_object_name, generate_guid, _make_list_unique, + resolve_dataset_name_and_id, + resolve_workspace_name_and_id, ) from sempy_labs._list_functions import list_relationships from sempy_labs._refresh_semantic_model import refresh_semantic_model @@ -17,6 +19,7 @@ import sempy_labs._icons as icons from sempy.fabric.exceptions import FabricHTTPException import ast +from uuid import UUID if TYPE_CHECKING: import Microsoft.AnalysisServices.Tabular @@ -27,27 +30,33 @@ class TOMWrapper: """ Convenience wrapper around the TOM object model for a semantic model. Always use the connect_semantic_model function to make sure the TOM object is initialized correctly. - `XMLA read/write endpoints `_ must - be enabled if setting the readonly parameter to False. + `XMLA read/write endpoints `_ must be enabled if setting the readonly parameter to False. """ - _dataset: str - _workspace: str + _dataset_id: UUID + _dataset_name: str + _workspace_id: UUID + _workspace_name: str _readonly: bool _tables_added: List[str] _table_map = dict _column_map = dict def __init__(self, dataset, workspace, readonly): - self._dataset = dataset - self._workspace = workspace + + (workspace_name, workspace_id) = resolve_workspace_name_and_id(workspace) + (dataset_name, dataset_id) = resolve_dataset_name_and_id(dataset, workspace_id) + self._dataset_id = dataset_id + self._dataset_name = dataset_name + self._workspace_name = workspace_name + self._workspace_id = workspace_id self._readonly = readonly self._tables_added = [] self._tom_server = fabric.create_tom_server( - readonly=readonly, workspace=workspace + readonly=readonly, workspace=workspace_id ) - self.model = self._tom_server.Databases.GetByName(dataset).Model + self.model = self._tom_server.Databases[dataset_id].Model self._table_map = {} self._column_map = {} @@ -2160,7 +2169,9 @@ def mark_as_date_table( ) """ df = fabric.evaluate_dax( - dataset=self._dataset, workspace=self._workspace, dax_string=dax_query + dataset=self._dataset_id, + workspace=self._workspace_id, + dax_string=dax_query, ) value = df["[1]"].iloc[0] if value != "1": @@ -2424,7 +2435,7 @@ def set_kpi( ) except Exception: raise ValueError( - f"{icons.red_dot} The '{measure_name}' measure does not exist in the '{self._dataset}' semantic model within the '{self._workspace}'." + f"{icons.red_dot} The '{measure_name}' measure does not exist in the '{self._dataset_name}' semantic model within the '{self._workspace_name}'." ) graphics = [ @@ -2467,7 +2478,7 @@ def set_kpi( ) except Exception: raise ValueError( - f"{icons.red_dot} The '{target}' measure does not exist in the '{self._dataset}' semantic model within the '{self._workspace}'." + f"{icons.red_dot} The '{target}' measure does not exist in the '{self._dataset_name}' semantic model within the '{self._workspace_name}'." ) if measure_target: @@ -2793,7 +2804,7 @@ def add_field_parameter( success = True if not success: raise ValueError( - f"{icons.red_dot} The '{obj}' object was not found in the '{self._dataset}' semantic model." + f"{icons.red_dot} The '{obj}' object was not found in the '{self._dataset_name}' semantic model." ) else: i += 1 @@ -2881,19 +2892,19 @@ def set_vertipaq_annotations(self): from sempy_labs._list_functions import list_tables dfT = list_tables( - dataset=self._dataset, workspace=self._workspace, extended=True + dataset=self._dataset_id, workspace=self._workspace_id, extended=True ) dfC = fabric.list_columns( - dataset=self._dataset, workspace=self._workspace, extended=True + dataset=self._dataset_id, workspace=self._workspace_id, extended=True ) dfP = fabric.list_partitions( - dataset=self._dataset, workspace=self._workspace, extended=True + dataset=self._dataset_id, workspace=self._workspace_id, extended=True ) dfH = fabric.list_hierarchies( - dataset=self._dataset, workspace=self._workspace, extended=True + dataset=self._dataset_id, workspace=self._workspace_id, extended=True ) dfR = list_relationships( - dataset=self._dataset, workspace=self._workspace, extended=True + dataset=self._dataset_id, workspace=self._workspace_id, extended=True ) for t in self.model.Tables: @@ -3338,7 +3349,9 @@ def is_direct_lake_using_view(self): usingView = False if self.is_direct_lake(): - df = check_fallback_reason(dataset=self._dataset, workspace=self._workspace) + df = check_fallback_reason( + dataset=self._dataset_id, workspace=self._workspace_id + ) df_filt = df[df["FallbackReasonID"] == 2] if len(df_filt) > 0: @@ -3385,7 +3398,7 @@ def show_incremental_refresh_policy(self, table_name: str): if rp is None: print( - f"{icons.yellow_dot} The '{table_name}' table in the '{self._dataset}' semantic model within the '{self._workspace}' workspace does not have an incremental refresh policy." + f"{icons.yellow_dot} The '{table_name}' table in the '{self._dataset_name}' semantic model within the '{self._workspace_name}' workspace does not have an incremental refresh policy." ) else: print(f"Table Name: {table_name}") @@ -3884,14 +3897,14 @@ def add_time_intelligence( if table_name is None: raise ValueError( - f"{icons.red_dot} The '{measure_name}' is not a valid measure in the '{self._dataset}' semantic model within the '{self._workspace}' workspace." + f"{icons.red_dot} The '{measure_name}' is not a valid measure in the '{self._dataset_name}' semantic model within the '{self._workspace_name}' workspace." ) table_name = matching_measures[0] # Validate date table if not self.is_date_table(date_table): raise ValueError( - f"{icons.red_dot} The '{date_table}' table is not a valid date table in the '{self._dataset}' wemantic model within the '{self._workspace}' workspace." + f"{icons.red_dot} The '{date_table}' table is not a valid date table in the '{self._dataset_name}' wemantic model within the '{self._workspace_name}' workspace." ) # Extract date key from date table @@ -3903,7 +3916,7 @@ def add_time_intelligence( if not matching_columns: raise ValueError( - f"{icons.red_dot} The '{date_table}' table does not have a date key column in the '{self._dataset}' semantic model within the '{self._workspace}' workspace." + f"{icons.red_dot} The '{date_table}' table does not have a date key column in the '{self._dataset_name}' semantic model within the '{self._workspace_name}' workspace." ) date_key = matching_columns[0] @@ -4383,7 +4396,6 @@ def generate_measure_descriptions( if isinstance(measure_name, str): measure_name = [measure_name] - workspace_id = fabric.resolve_workspace_id(self._workspace) client = fabric.FabricRestClient() if len(measure_name) > max_batch_size: @@ -4402,7 +4414,7 @@ def generate_measure_descriptions( "modelItems": [], }, }, - "workspaceId": workspace_id, + "workspaceId": self._workspace_id, "artifactInfo": {"artifactType": "SemanticModel"}, } for m_name in measure_list: @@ -4413,7 +4425,7 @@ def generate_measure_descriptions( ) if t_name is None: raise ValueError( - f"{icons.red_dot} The '{m_name}' measure does not exist in the '{self._dataset}' semantic model within the '{self._workspace}' workspace." + f"{icons.red_dot} The '{m_name}' measure does not exist in the '{self._dataset_name}' semantic model within the '{self._workspace_name}' workspace." ) new_item = { @@ -4606,9 +4618,9 @@ def close(self): if len(self._tables_added) > 0: refresh_semantic_model( - dataset=self._dataset, + dataset=self._dataset_id, tables=self._tables_added, - workspace=self._workspace, + workspace=self._workspace_id, ) self.model = None @@ -4618,15 +4630,15 @@ def close(self): @log @contextmanager def connect_semantic_model( - dataset: str, readonly: bool = True, workspace: Optional[str] = None + dataset: str | UUID, readonly: bool = True, workspace: Optional[str] = None ) -> Iterator[TOMWrapper]: """ Connects to the Tabular Object Model (TOM) within a semantic model. Parameters ---------- - dataset : str - Name of the semantic model. + dataset : str | UUID + Name or ID of the semantic model. readonly: bool, default=True Whether the connection is read-only or read/write. Setting this to False enables read/write which saves the changes made back to the server. workspace : str, default=None @@ -4643,10 +4655,6 @@ def connect_semantic_model( # initialize .NET to make sure System and Microsoft.AnalysisServices.Tabular is defined sempy.fabric._client._utils._init_analysis_services() - if workspace is None: - workspace_id = fabric.get_workspace_id() - workspace = fabric.resolve_workspace_name(workspace_id) - tw = TOMWrapper(dataset=dataset, workspace=workspace, readonly=readonly) try: yield tw diff --git a/tests/test_tom.py b/tests/test_tom.py index c96fff8a..aba5ce3f 100644 --- a/tests/test_tom.py +++ b/tests/test_tom.py @@ -3,29 +3,35 @@ from sempy_labs.tom import connect_semantic_model -@patch("sempy.fabric.resolve_workspace_name") +@patch("sempy.fabric.resolve_item_id") +@patch("sempy.fabric.resolve_workspace_id") +@patch("sempy_labs._helper_functions.resolve_dataset_name_and_id") +@patch("sempy_labs._helper_functions.resolve_workspace_name_and_id") @patch("sempy.fabric.create_tom_server") -def test_tom_wrapper(create_tom_server, resolve_workspace_name): +def test_tom_wrapper(create_tom_server, resolve_workspace_name_and_id, resolve_dataset_name_and_id, resolve_workspace_id, resolve_item_id): sempy.fabric._client._utils._init_analysis_services() import Microsoft.AnalysisServices.Tabular as TOM + resolve_workspace_name_and_id.return_value = ("my_workspace", "my_workspace_id") + resolve_dataset_name_and_id.return_value = ("my_dataset", "my_dataset_id") + resolve_workspace_id.return_value = "my_workspace_id" + resolve_item_id.return_value = "my_dataset_id" + # create dummy server, database and model tom_server = TOM.Server() db = TOM.Database() db.Name = "my_dataset" - db.ID = "my_dataset" + db.ID = "my_dataset_id" db.Model = TOM.Model() tom_server.Databases.Add(db) create_tom_server.return_value = tom_server - resolve_workspace_name.return_value = "my_workspace" - # invoke the wrapper - with connect_semantic_model("my_dataset") as tom: + with connect_semantic_model(dataset="my_dataset_id", workspace="my_workspace") as tom: tom.add_table("my_table") # validate the result - assert tom_server.Databases["my_dataset"].Model.Tables["my_table"] is not None + assert tom_server.Databases["my_dataset_id"].Model.Tables["my_table"] is not None