From dd04c16443096e20c09bb54b4777362e1fed6710 Mon Sep 17 00:00:00 2001 From: Fabio Zadrozny Date: Wed, 11 Dec 2024 09:40:39 -0300 Subject: [PATCH] wip --- sema4ai/codegen/commands.py | 7 + sema4ai/codegen/views.py | 5 + sema4ai/package.json | 12 + sema4ai/src/sema4ai_code/commands.py | 1 + .../data/data_server_connection.py | 6 + .../sema4ai_code/data/data_source_helper.py | 144 ++++++ .../sema4ai_code/robo/collect_actions_ast.py | 6 +- .../sema4ai_code/robocorp_language_server.py | 450 ++++++++++++------ .../robo/test_list_actions.py | 23 +- ...nd_datasources_mutiple_data_sources_3_.yml | 20 + .../test_compute_data_sources_state.py | 225 ++++++--- .../missing_data_source_all.yml | 12 +- .../missing_data_source_none.yml | 6 +- .../missing_data_source_prediction.yml | 12 +- .../missing_data_source_sqlite.yml | 12 +- .../package/data_sources.py | 6 +- sema4ai/vscode-client/src/extension.ts | 3 + sema4ai/vscode-client/src/protocols.ts | 4 + .../src/robo/dataSourceHandling.ts | 40 ++ sema4ai/vscode-client/src/robocorpCommands.ts | 3 +- 20 files changed, 759 insertions(+), 238 deletions(-) create mode 100644 sema4ai/src/sema4ai_code/data/data_source_helper.py create mode 100644 sema4ai/tests/sema4ai_code_tests/robo/test_list_actions/test_list_actions_and_datasources_mutiple_data_sources_3_.yml create mode 100644 sema4ai/vscode-client/src/robo/dataSourceHandling.ts diff --git a/sema4ai/codegen/commands.py b/sema4ai/codegen/commands.py index b0534cdc..b502463e 100644 --- a/sema4ai/codegen/commands.py +++ b/sema4ai/codegen/commands.py @@ -1026,6 +1026,13 @@ def __init__( add_to_package_json=True, icon="$(trash)", ), + Command( + "sema4ai.setupDataSource", + "Setup Data Source", + server_handled=False, + hide_from_command_palette=False, + icon="$(diff-added)", + ), ] diff --git a/sema4ai/codegen/views.py b/sema4ai/codegen/views.py index e5970c90..f243e6cb 100644 --- a/sema4ai/codegen/views.py +++ b/sema4ai/codegen/views.py @@ -160,6 +160,11 @@ def __init__( MenuGroup.NAVIGATION, when="viewItem == robotItem", ), + Menu( + "sema4ai.setupDataSource", + MenuGroup.INLINE, + when="viewItem == datasourceItem", + ), Menu( "sema4ai.openRobotTreeSelection", MenuGroup.NAVIGATION, diff --git a/sema4ai/package.json b/sema4ai/package.json index 50518df4..9e69eb4e 100644 --- a/sema4ai/package.json +++ b/sema4ai/package.json @@ -175,6 +175,7 @@ "onCommand:sema4ai.runActionPackageDevTask", "onCommand:sema4ai.getActionsMetadata", "onCommand:sema4ai.dropDataSource", + "onCommand:sema4ai.setupDataSource", "onDebugInitialConfigurations", "onDebugResolve:sema4ai", "onView:sema4ai-task-packages-tree", @@ -1013,6 +1014,12 @@ "title": "Drop Data Sources", "category": "Sema4.ai", "icon": "$(trash)" + }, + { + "command": "sema4ai.setupDataSource", + "title": "Setup Data Source", + "category": "Sema4.ai", + "icon": "$(diff-added)" } ], "menus": { @@ -1144,6 +1151,11 @@ "when": "view == sema4ai-task-packages-tree && viewItem == robotItem", "group": "navigation" }, + { + "command": "sema4ai.setupDataSource", + "when": "view == sema4ai-task-packages-tree && viewItem == datasourceItem", + "group": "inline" + }, { "command": "sema4ai.openRobotTreeSelection", "when": "view == sema4ai-task-packages-tree && viewItem == robotItem", diff --git a/sema4ai/src/sema4ai_code/commands.py b/sema4ai/src/sema4ai_code/commands.py index f5cdc152..2fd8eadc 100644 --- a/sema4ai/src/sema4ai_code/commands.py +++ b/sema4ai/src/sema4ai_code/commands.py @@ -148,6 +148,7 @@ SEMA4AI_RUN_ACTION_PACKAGE_DEV_TASK = "sema4ai.runActionPackageDevTask" # Run dev-task (from Action Package) SEMA4AI_GET_ACTIONS_METADATA = "sema4ai.getActionsMetadata" # Get Actions Metadata SEMA4AI_DROP_DATA_SOURCE = "sema4ai.dropDataSource" # Drop Data Sources +SEMA4AI_SETUP_DATA_SOURCE = "sema4ai.setupDataSource" # Setup Data Source ALL_SERVER_COMMANDS = [ SEMA4AI_GET_PLUGINS_DIR, diff --git a/sema4ai/src/sema4ai_code/data/data_server_connection.py b/sema4ai/src/sema4ai_code/data/data_server_connection.py index 748e027f..190c10b2 100644 --- a/sema4ai/src/sema4ai_code/data/data_server_connection.py +++ b/sema4ai/src/sema4ai_code/data/data_server_connection.py @@ -3,6 +3,8 @@ from pathlib import Path from typing import Any, Optional +from sema4ai_ls_core.protocols import DatasourceInfoTypedDict + if typing.TYPE_CHECKING: from .result_set import ResultSet @@ -211,3 +213,7 @@ def run_sql( def upload_file(self, file_path: Path, table_name: str) -> None: self._http_connection.upload_file(file_path, table_name) + + def get_data_sources(self, where: str) -> list[dict[str, Any]]: + result_set = self.query("", f"SHOW DATABASES {where}") + return list(result_set.iter_as_dicts()) diff --git a/sema4ai/src/sema4ai_code/data/data_source_helper.py b/sema4ai/src/sema4ai_code/data/data_source_helper.py new file mode 100644 index 00000000..3566a674 --- /dev/null +++ b/sema4ai/src/sema4ai_code/data/data_source_helper.py @@ -0,0 +1,144 @@ +import typing +from pathlib import Path + +from sema4ai_ls_core.protocols import DatasourceInfoTypedDict + +if typing.TYPE_CHECKING: + from sema4ai_code.data.data_server_connection import DataServerConnection + + +class DataSourceHelper: + def __init__( + self, + root_path: Path, + datasource: "DatasourceInfoTypedDict", + connection: "DataServerConnection", + ): + # model datasource: a model created in a project + self._is_model_datasource = False + + # table datasource: it's either a file or a custom engine that creates a table + self._is_table_datasource = False + self._root_path = root_path + self._custom_sql: tuple[str, ...] | None = None + + self.datasource = datasource + self.connection = connection + + # Do as the last thing as it may update fields (such as custom_sql, is_model_datasource, is_table_datasource, etc.) + error = self._compute_validation_error() + self._validation_errors: tuple[str, ...] = (error,) if error else () + + @property + def is_model_datasource(self) -> bool: + return self._is_model_datasource + + @property + def is_table_datasource(self) -> bool: + return self._is_table_datasource + + @property + def custom_sql(self) -> tuple[str, ...] | None: + return self._custom_sql + + @property + def root_path(self) -> Path: + return self._root_path + + def get_validation_errors(self) -> tuple[str, ...]: + return self._validation_errors + + def _compute_validation_error(self) -> str | None: + datasource = self.datasource + datasource_name = datasource.get("name") + + if not datasource_name: + return "It was not possible to statically discover the name of a datasource. Please specify the name of the datasource directly in the datasource definition." + + datasource_engine = datasource.get("engine") + if not datasource_engine: + return f"It was not possible to statically discover the engine of a datasource ({datasource_name}). Please specify the engine of the datasource directly in the datasource definition." + + created_table = datasource.get("created_table") + model_name = datasource.get("model_name") + + if created_table and model_name: + return f"DataSource: {datasource_name} - The datasource cannot specify both the created_table and model_name fields." + + if datasource_engine == "custom": + # Custom engine must have sql + error = self._update_custom_sql(datasource) + if error: + return error + + if created_table: + self._is_model_datasource = True + elif model_name: + self._is_model_datasource = True + return None + + if datasource_engine == "files": + if not created_table: + return f"DataSource: {datasource_name} - The files engine requires the created_table field to be set." + + relative_path = datasource.get("file") + if not relative_path: + return f"DataSource: {datasource_name} - The files engine requires the file field to be set." + + full_path = Path(self.root_path) / relative_path + if not full_path.exists(): + return f"DataSource: {datasource_name} - The files engine requires the file field to be set to a valid file path. File does not exist: {full_path}" + + self._is_table_datasource = True + return None + else: + if created_table: + return f"DataSource: {datasource_name} - The engine: {datasource_engine} does not support the created_table field." + + if datasource_engine.startswith("prediction:"): + error = self._update_custom_sql(datasource) + if error: + return error + + if not model_name: + return f"DataSource: {datasource_name} - The prediction engine requires the model_name field to be set." + self._is_model_datasource = True + return None + else: + if model_name: + return f"DataSource: {datasource_name} - The engine: {datasource_engine} does not support the model_name field." + + return None + + def _update_custom_sql(self, datasource: "DatasourceInfoTypedDict") -> None | str: + datasource_name = datasource.get("name") + setup_sql = datasource.get("setup_sql") + setup_sql_files = datasource.get("setup_sql_files") + if not setup_sql and not setup_sql_files: + return f"DataSource: {datasource_name} - The custom engine requires the setup_sql or setup_sql_files field to be set." + + if setup_sql_files and setup_sql: + return f"DataSource: {datasource_name} - The custom engine cannot specify both the setup_sql and setup_sql_files fields." + + if setup_sql: + if isinstance(setup_sql, str): + setup_sql = [setup_sql] + if not isinstance(setup_sql, list): + return f"DataSource: {datasource_name} - The setup_sql field must be a string or a list of strings." + self._custom_sql = tuple(setup_sql) + else: + if isinstance(setup_sql_files, str): + setup_sql_files = [setup_sql_files] + if not isinstance(setup_sql_files, list): + return f"DataSource: {datasource_name} - The setup_sql_files field must be a string or a list of strings." + + # read the files + sqls = [] + for file in setup_sql_files: + full_path = Path(self.root_path) / file + if not full_path.exists(): + return f"DataSource: {datasource_name} - The setup_sql_files field must be set to a list of valid file paths. File does not exist: {full_path}" + txt = full_path.read_text() + sqls.append(txt) + self._custom_sql = tuple(sqls) + return None diff --git a/sema4ai/src/sema4ai_code/robo/collect_actions_ast.py b/sema4ai/src/sema4ai_code/robo/collect_actions_ast.py index 79bda893..127f6818 100644 --- a/sema4ai/src/sema4ai_code/robo/collect_actions_ast.py +++ b/sema4ai/src/sema4ai_code/robo/collect_actions_ast.py @@ -88,6 +88,8 @@ def _resolve_value(node: ast_module.AST, variable_values: dict[str, str]) -> str return node.value elif isinstance(node, ast_module.Name) and node.id in variable_values: return variable_values[node.id] + elif isinstance(node, ast_module.List): + return [_resolve_value(item, variable_values) for item in node.elts] return None @@ -148,8 +150,8 @@ def _extract_datasource_info( ], keyword.arg, ) - name = _resolve_value(keyword.value, variable_values) - info[key] = name + value = _resolve_value(keyword.value, variable_values) + info[key] = value if info.get("engine") == "files": info["name"] = "files" diff --git a/sema4ai/src/sema4ai_code/robocorp_language_server.py b/sema4ai/src/sema4ai_code/robocorp_language_server.py index dbedcc28..77b50440 100644 --- a/sema4ai/src/sema4ai_code/robocorp_language_server.py +++ b/sema4ai/src/sema4ai_code/robocorp_language_server.py @@ -21,6 +21,8 @@ TextDocumentCodeActionTypedDict, ) from sema4ai_ls_core.protocols import ( + ActionInfoTypedDict, + DatasourceInfoTypedDict, DataSourceStateDict, IConfig, IMonitor, @@ -89,6 +91,9 @@ if typing.TYPE_CHECKING: from sema4ai_code.agents.gallery_actions import GalleryActionPackages + from sema4ai_code.data.data_server_connection import DataServerConnection + +DataSourceSetupResponse = list[str] log = get_logger(__name__) @@ -2348,12 +2353,9 @@ def _import_zip_as_action_package( log.exception(msg) return ActionResult.make_failure(msg).as_dict() - def _drop_data_source_impl( - self, - datasource: DatasourceInfoTypedDict, - data_server_info: DataServerConfigTypedDict, - monitor: IMonitor, - ) -> ActionResultDict: + def _get_connection( + self, data_server_info: DataServerConfigTypedDict + ) -> "DataServerConnection": from sema4ai_code.data.data_server_connection import DataServerConnection http = data_server_info["api"]["http"] @@ -2366,7 +2368,15 @@ def _drop_data_source_impl( http_user=user, http_password=password, ) + return connection + def _drop_data_source_impl( + self, + datasource: DatasourceInfoTypedDict, + data_server_info: DataServerConfigTypedDict, + monitor: IMonitor, + ) -> ActionResultDict: + connection = self._get_connection(data_server_info) name = datasource.get("name") engine = datasource["engine"] created_table = datasource.get("created_table") @@ -2424,6 +2434,164 @@ def m_drop_data_source( ) ) + def m_setup_data_source( + self, + action_package_yaml_directory_uri: str, + datasource: DatasourceInfoTypedDict, + data_server_info: DataServerConfigTypedDict, + ) -> partial[ActionResultDict[DataSourceSetupResponse]]: + return require_monitor( + partial( + self._setup_data_source_impl, + action_package_yaml_directory_uri, + datasource, + data_server_info, + ) + ) + + def _setup_data_source_impl( + self, + action_package_yaml_directory_uri: str, + datasource: DatasourceInfoTypedDict | list[DatasourceInfoTypedDict], + data_server_info: DataServerConfigTypedDict, + monitor: IMonitor, + ) -> ActionResultDict[DataSourceSetupResponse]: + from sema4ai_ls_core.progress_report import progress_context + + from sema4ai_code.data.data_source_helper import DataSourceHelper + + with progress_context( + self._endpoint, "Setting up data sources", self._dir_cache + ): + root_path = Path(uris.to_fs_path(action_package_yaml_directory_uri)) + if not root_path.exists(): + return ( + ActionResult[DataSourceSetupResponse] + .make_failure( + f"Unable to setup data source. Root path does not exist: {root_path}" + ) + .as_dict() + ) + + if not isinstance(datasource, list): + datasources = [datasource] + else: + datasources = datasource + + connection = self._get_connection(data_server_info) + messages = [] + for datasource in datasources: + monitor.check_cancelled() + datasource_helper = DataSourceHelper( + Path(uris.to_fs_path(action_package_yaml_directory_uri)), + datasource, + connection, + ) + validation_errors = datasource_helper.get_validation_errors() + if validation_errors: + return ( + ActionResult[DataSourceSetupResponse] + .make_failure( + f"Unable to setup data source. {validation_errors[0]}" + ) + .as_dict() + ) + + if datasource["engine"] == "files": + created_table = datasource["created_table"] + relative_path = datasource["file"] + + # These asserts should've been caught by the validation. + assert ( + datasource_helper.is_table_datasource + ), "Expected a table datasource for the files engine." + assert ( + created_table + ), "Expected a created_table for the files engine." + assert relative_path, "Expected a file for the files engine." + + full_path = Path(root_path) / relative_path + if not full_path.exists(): + return ( + ActionResult[DataSourceSetupResponse] + .make_failure( + f"Unable to setup files engine data source. File does not exist: {full_path}" + ) + .as_dict() + ) + try: + connection.upload_file(full_path, created_table) + except Exception as e: + return ( + ActionResult[DataSourceSetupResponse] + .make_failure( + f"Unable to upload file {full_path} to table files.{created_table}. Error: {e}" + ) + .as_dict() + ) + + messages.append( + f"Uploaded file {full_path} to table {created_table}" + ) + continue + + if datasource["engine"] == "custom": + # These asserts should've been caught by the validation. + assert ( + datasource_helper.custom_sql + ), "Expected the sql to be defined for the custom engine." + + for sql in datasource_helper.custom_sql: + try: + connection.run_sql(sql) + except Exception as e: + return ( + ActionResult[DataSourceSetupResponse] + .make_failure( + f"Unable to setup custom engine data source. Error executing SQL: {sql}. Error: {e}" + ) + .as_dict() + ) + + messages.append( + f"custom engine setup: executed {len(datasource_helper.custom_sql)} SQL statements." + ) + continue + + if datasource["engine"].startswith("prediction:"): + model_name = datasource["model_name"] + assert ( + model_name + ), "Expected a model_name for the prediction engine." + assert ( + datasource_helper.custom_sql + ), "Expected the setup sql to be defined for the prediction engine." + + for sql in datasource_helper.custom_sql: + try: + connection.run_sql(sql) + except Exception as e: + return ( + ActionResult[DataSourceSetupResponse] + .make_failure( + f"Unable to setup prediction engine data source. Error executing SQL: {sql}. Error: {e}" + ) + .as_dict() + ) + + messages.append( + f"prediction engine setup: executed {len(datasource_helper.custom_sql)} SQL statements." + ) + continue + + messages.append( + f"Unable to setup external data source automatically (engine: {datasource['engine']}). Please use the `Sema4.ai: Add New Data Source` command to setup this data source." + ) + + return ( + ActionResult[DataSourceSetupResponse].make_success(messages).as_dict() + ) + def m_compute_data_source_state( self, action_package_yaml_directory_uri: str, @@ -2462,169 +2630,147 @@ def _impl_compute_data_source_state_impl( monitor: IMonitor, ) -> ActionResultDict[DataSourceStateDict]: from sema4ai_ls_core.lsp import DiagnosticSeverity, DiagnosticsTypedDict + from sema4ai_ls_core.progress_report import progress_context - from sema4ai_code.data.data_server_connection import DataServerConnection + from sema4ai_code.data.data_source_helper import DataSourceHelper - actions_and_datasources_result: ActionResultDict[ - "list[ActionInfoTypedDict | DatasourceInfoTypedDict]" - ] = self._local_list_actions_internal_impl( - action_package_uri=action_package_yaml_directory_uri, - collect_datasources=True, - ) - if not actions_and_datasources_result["success"]: - # Ok to cast as it's an error. - return typing.cast( - ActionResultDict[DataSourceStateDict], - actions_and_datasources_result, + with progress_context( + self._endpoint, "Computing data sources state", self._dir_cache + ) as progress_reporter: + progress_reporter.set_additional_info("Listing actions") + actions_and_datasources_result: ActionResultDict[ + "list[ActionInfoTypedDict | DatasourceInfoTypedDict]" + ] = self._local_list_actions_internal_impl( + action_package_uri=action_package_yaml_directory_uri, + collect_datasources=True, ) + if not actions_and_datasources_result["success"]: + # Ok to cast as it's an error. + return typing.cast( + ActionResultDict[DataSourceStateDict], + actions_and_datasources_result, + ) - http = data_server_info["api"]["http"] - auth = data_server_info["auth"] - user = auth["username"] - password = auth["password"] - - connection = DataServerConnection( - http_url=f"http://{http['host']}:{http['port']}", - http_user=user, - http_password=password, - ) + monitor.check_cancelled() + progress_reporter.set_additional_info("Getting data sources") + connection = self._get_connection(data_server_info) + projects_as_dicts = connection.get_data_sources("WHERE type = 'project'") + not_projects_as_dicts = connection.get_data_sources( + "WHERE type != 'project'" + ) - result_set_projects = connection.query( - "", "SHOW DATABASES WHERE type = 'project'" - ) - result_set_not_projects = connection.query( - "", "SHOW DATABASES WHERE type != 'project'" - ) - projects_as_dicts = list(result_set_projects.iter_as_dicts()) - not_projects_as_dicts = list(result_set_not_projects.iter_as_dicts()) + all_databases_as_dicts = projects_as_dicts + not_projects_as_dicts - all_databases_as_dicts = projects_as_dicts + not_projects_as_dicts + try: + data_source_names_in_data_server = set( + x["database"].lower() for x in all_databases_as_dicts + ) + except Exception: + log.exception( + "Error getting data source names in data server. Query result: %s", + all_databases_as_dicts, + ) + return ( + ActionResult[DataSourceStateDict] + .make_failure("Error getting data source names in data server") + .as_dict() + ) - try: - data_source_names_in_data_server = set( - x["database"].lower() for x in all_databases_as_dicts - ) - except Exception: - log.exception( - "Error getting data source names in data server. Query result: %s", - all_databases_as_dicts, - ) - return ( - ActionResult[DataSourceStateDict] - .make_failure("Error getting data source names in data server") - .as_dict() + projects_data_source_names_in_data_server = set( + x["database"].lower() for x in projects_as_dicts ) - projects_data_source_names_in_data_server = set( - x["database"].lower() for x in projects_as_dicts - ) + monitor.check_cancelled() + progress_reporter.set_additional_info("Getting models") - data_source_to_models = {} - for data_source in projects_data_source_names_in_data_server: - result_set_models = connection.query(data_source, "SELECT * FROM models") - if result_set_models: - data_source_to_models[data_source] = [ - x["name"] for x in result_set_models.iter_as_dicts() - ] - - files_table_names = set( - x["tables_in_files"] - for x in connection.query("files", "SHOW TABLES").iter_as_dicts() - ) + data_source_to_models = {} + for data_source in projects_data_source_names_in_data_server: + result_set_models = connection.query( + data_source, "SELECT * FROM models" + ) + if result_set_models: + data_source_to_models[data_source] = [ + x["name"] for x in result_set_models.iter_as_dicts() + ] - assert actions_and_datasources_result["result"] is not None - actions_and_datasources: "list[ActionInfoTypedDict | DatasourceInfoTypedDict]" = actions_and_datasources_result[ - "result" - ] - required_data_sources: list["DatasourceInfoTypedDict"] = [ - typing.cast("DatasourceInfoTypedDict", d) - for d in actions_and_datasources - if d["kind"] == "datasource" - ] + monitor.check_cancelled() + progress_reporter.set_additional_info("Getting files") - unconfigured_data_sources: list["DatasourceInfoTypedDict"] = [] - uri_to_error_messages: dict[str, list[DiagnosticsTypedDict]] = {} - ret: DataSourceStateDict = { - "unconfigured_data_sources": unconfigured_data_sources, - "uri_to_error_messages": uri_to_error_messages, - "required_data_sources": required_data_sources, - "data_sources_in_data_server": sorted(data_source_names_in_data_server), - } + files_table_names = set( + x["tables_in_files"] + for x in connection.query("files", "SHOW TABLES").iter_as_dicts() + ) - if required_data_sources: - datasource: "DatasourceInfoTypedDict" - for datasource in required_data_sources: - uri = datasource.get("uri", "") - datasource_name = datasource.get("name") - - if not datasource_name: - uri_to_error_messages.setdefault(uri, []).append( - { - "range": datasource["range"], - "severity": DiagnosticSeverity.Error, - "message": "It was not possible to statically discover the name of a datasource. Please specify the name of the datasource directly in the datasource definition.", - } - ) - continue + monitor.check_cancelled() + progress_reporter.set_additional_info("Computing data sources state") - datasource_engine = datasource.get("engine") - if not datasource_engine: - uri_to_error_messages.setdefault(uri, []).append( - { - "range": datasource["range"], - "severity": DiagnosticSeverity.Error, - "message": f"It was not possible to statically discover the engine of a datasource ({datasource_name}). Please specify the engine of the datasource directly in the datasource definition.", - } - ) - continue + assert actions_and_datasources_result["result"] is not None + actions_and_datasources: "list[ActionInfoTypedDict | DatasourceInfoTypedDict]" = actions_and_datasources_result[ + "result" + ] + required_data_sources: list["DatasourceInfoTypedDict"] = [ + typing.cast("DatasourceInfoTypedDict", d) + for d in actions_and_datasources + if d["kind"] == "datasource" + ] - if datasource_engine == "files" or ( - datasource_name == "custom" and datasource.get("created_table") - ): - created_table = datasource.get("created_table") - if not created_table: - uri_to_error_messages.setdefault(uri, []).append( - { - "range": datasource["range"], - "severity": DiagnosticSeverity.Error, - "message": "The files engine requires the created_table field to be set.", - } - ) - continue + unconfigured_data_sources: list["DatasourceInfoTypedDict"] = [] + uri_to_error_messages: dict[str, list[DiagnosticsTypedDict]] = {} + ret: DataSourceStateDict = { + "unconfigured_data_sources": unconfigured_data_sources, + "uri_to_error_messages": uri_to_error_messages, + "required_data_sources": required_data_sources, + "data_sources_in_data_server": sorted(data_source_names_in_data_server), + } - if datasource_engine == "files": - if created_table not in files_table_names: - unconfigured_data_sources.append(datasource) - else: - # Custom datasource with created_table. - custom_table_names = set( - x["tables_in_files"] - for x in connection.query( - "files", "SHOW TABLES" - ).iter_as_dicts() - ) - if created_table not in custom_table_names: + if required_data_sources: + root_path = Path(uris.to_fs_path(action_package_yaml_directory_uri)) + datasource: "DatasourceInfoTypedDict" + for datasource in required_data_sources: + uri = datasource.get("uri", "") + datasource_helper = DataSourceHelper( + root_path, datasource, connection + ) + validation_errors = datasource_helper.get_validation_errors() + if validation_errors: + for validation_error in validation_errors: + uri_to_error_messages.setdefault(uri, []).append( + { + "range": datasource["range"], + "severity": DiagnosticSeverity.Error, + "message": validation_error, + } + ) + continue # this one is invalid, so, we can't go forward. + + datasource_name = datasource["name"] + datasource_engine = datasource["engine"] + + if datasource_helper.is_table_datasource: + created_table = datasource["created_table"] + if datasource_engine == "files": + if created_table not in files_table_names: + unconfigured_data_sources.append(datasource) + else: + # Custom datasource with created_table. + tables_result_set = connection.query("files", "SHOW TABLES") + custom_table_names = set( + x["tables_in_files"] + for x in tables_result_set.iter_as_dicts() + ) + if created_table not in custom_table_names: + unconfigured_data_sources.append(datasource) + continue # Ok, handled use case. + + if datasource_helper.is_model_datasource: + model_name = datasource["model_name"] + if model_name not in data_source_to_models.get( + datasource_name, [] + ): unconfigured_data_sources.append(datasource) - continue - - if datasource_engine.startswith("prediction:") or ( - datasource_name == "custom" and datasource.get("model_name") - ): - model_name = datasource.get("model_name") - if not model_name: - uri_to_error_messages.setdefault(uri, []).append( - { - "range": datasource["range"], - "severity": DiagnosticSeverity.Error, - "message": "The prediction engine requires the model_name field to be set.", - } - ) continue - if model_name not in data_source_to_models.get(datasource_name, []): - unconfigured_data_sources.append(datasource) - continue - if datasource_name.lower() not in data_source_names_in_data_server: - unconfigured_data_sources.append(datasource) + if datasource_name.lower() not in data_source_names_in_data_server: + unconfigured_data_sources.append(datasource) - return ActionResult[DataSourceStateDict].make_success(ret).as_dict() + return ActionResult[DataSourceStateDict].make_success(ret).as_dict() diff --git a/sema4ai/tests/sema4ai_code_tests/robo/test_list_actions.py b/sema4ai/tests/sema4ai_code_tests/robo/test_list_actions.py index 3faa5b9b..d2e17aa1 100644 --- a/sema4ai/tests/sema4ai_code_tests/robo/test_list_actions.py +++ b/sema4ai/tests/sema4ai_code_tests/robo/test_list_actions.py @@ -318,9 +318,30 @@ def data_sources_2() -> str: """ +def data_sources_3() -> str: + return """ +import typing +from sema4ai.data import DataSource, DataSourceSpec + +ChurnPredictionDataSource = typing.Annotated[ + DataSource, + DataSourceSpec( + model_name="customer_churn_predictor", + engine="prediction:lightwood", + description="Datasource which provides along with a table named `customer_churn_predictor`.", + setup_sql=["CREATE MODEL IF NOT EXISTS customer_churn_predictor FROM files (SELECT * FROM churn) PREDICT Churn;"], + ), +] +""" + + @pytest.mark.parametrize( "scenario", - [data_sources, data_sources_2], + [ + data_sources, + data_sources_2, + data_sources_3, + ], ) def test_list_actions_and_datasources_mutiple(data_regression, scenario, tmpdir): action_package_path = Path(tmpdir) diff --git a/sema4ai/tests/sema4ai_code_tests/robo/test_list_actions/test_list_actions_and_datasources_mutiple_data_sources_3_.yml b/sema4ai/tests/sema4ai_code_tests/robo/test_list_actions/test_list_actions_and_datasources_mutiple_data_sources_3_.yml new file mode 100644 index 00000000..a3e7250b --- /dev/null +++ b/sema4ai/tests/sema4ai_code_tests/robo/test_list_actions/test_list_actions_and_datasources_mutiple_data_sources_3_.yml @@ -0,0 +1,20 @@ +- created_table: null + description: Datasource which provides along with a table named `customer_churn_predictor`. + engine: prediction:lightwood + file: null + kind: datasource + model_name: customer_churn_predictor + name: models + python_variable_name: ChurnPredictionDataSource + range: + end: + character: 25 + line: 5 + start: + character: 0 + line: 5 + setup_sql: + - CREATE MODEL IF NOT EXISTS customer_churn_predictor FROM files (SELECT * FROM + churn) PREDICT Churn; + setup_sql_files: null + uri: data_sources.py diff --git a/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state.py b/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state.py index c611ec59..b339bc02 100644 --- a/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state.py +++ b/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state.py @@ -1,10 +1,11 @@ import logging +from typing import Any import pytest +from sema4ai_code_tests.data_server_cli_wrapper import DataServerCliWrapper from sema4ai_ls_core.protocols import DataSourceStateDict from sema4ai_code.protocols import DataServerConfigTypedDict -from sema4ai_code_tests.data_server_cli_wrapper import DataServerCliWrapper log = logging.getLogger(__name__) @@ -12,6 +13,8 @@ @pytest.fixture def cleanup_data_sources(data_server_cli: DataServerCliWrapper): assert data_server_cli.http_connection is not None + data_server_cli.http_connection.run_sql("""CREATE PROJECT IF NOT EXISTS models""") + data_server_cli.http_connection.run_sql( "DROP TABLE IF EXISTS files.customers_in_test_compute_data_sources_state" ) @@ -54,7 +57,13 @@ def wait_for_models_to_be_ready( if not result: continue # Someone else removed it in the meanwhile? - row = next(result.iter_as_dicts()) + found = list(result.iter_as_dicts()) + if not found: + raise RuntimeError( + f"Model {project}.{model} not found in the data server." + ) + + row = next(iter(found)) status = row.get("status", "").lower() if status in ("generating", "training"): @@ -79,6 +88,7 @@ def wait_for_models_to_be_ready( def _create_prediction_model(data_server_cli: DataServerCliWrapper): + assert data_server_cli.http_connection is not None data_server_cli.http_connection.run_sql("""CREATE PROJECT IF NOT EXISTS models""") data_server_cli.http_connection.run_sql( """CREATE MODEL models.predict_compute_data_sources_state @@ -93,34 +103,9 @@ def _create_prediction_model(data_server_cli: DataServerCliWrapper): ) -def _collect_data_source_state( - language_server, data_server_info, root_path -) -> DataSourceStateDict: - import json - - from sema4ai_ls_core import uris - - result = language_server.request( - { - "jsonrpc": "2.0", - "id": language_server.next_id(), - "method": "computeDataSourceState", - "params": { - "action_package_yaml_directory_uri": uris.from_fs_path(root_path), - "data_server_info": data_server_info, - }, - } - )["result"] - assert result[ - "success" - ], f"Expected success. Full result: {json.dumps(result, indent=2)}" - - assert result["result"] is not None, "Expected result to be not None" - fixed_result: DataSourceStateDict = fix_data_sources_state_result(result["result"]) - return fixed_result - - -def _get_data_server_info(data_server_cli) -> DataServerConfigTypedDict: +def create_setup_config( + data_server_cli: DataServerCliWrapper, +) -> DataServerConfigTypedDict: http_port, mysql_port = data_server_cli.get_http_and_mysql_ports() data_server_info: DataServerConfigTypedDict = { @@ -141,34 +126,169 @@ def _get_data_server_info(data_server_cli) -> DataServerConfigTypedDict: "pid": -1, "pidFilePath": "", } - return data_server_info +@pytest.fixture +def collect_data_source_state( + language_server_initialized, data_server_cli, ws_root_path +) -> Any: + language_server = language_server_initialized + + data_server_info: DataServerConfigTypedDict = create_setup_config(data_server_cli) + + def func() -> DataSourceStateDict: + import json + + from sema4ai_ls_core import uris + + result = language_server.request( + { + "jsonrpc": "2.0", + "id": language_server.next_id(), + "method": "computeDataSourceState", + "params": { + "action_package_yaml_directory_uri": uris.from_fs_path( + ws_root_path + ), + "data_server_info": data_server_info, + }, + } + )["result"] + assert result[ + "success" + ], f"Expected success. Full result: {json.dumps(result, indent=2)}" + ret = result["result"] + fixed_result = fix_data_sources_state_result(ret) + return fixed_result + + return func + + @pytest.mark.data_server -def test_compute_data_sources_state( +def test_setup_datasource( data_server_cli: DataServerCliWrapper, - language_server_initialized, - ws_root_path, - datadir, data_regression, cleanup_data_sources, + collect_data_source_state, + datadir, + ws_root_path, + language_server_initialized, tmpdir, ): import json import shutil - from sema4ai_code_tests.data_server_fixtures import create_another_sqlite_sample_db + from sema4ai_ls_core import uris + from sema4ai_ls_core.protocols import DatasourceInfoTypedDict shutil.copytree(datadir / "package", ws_root_path) - language_server = language_server_initialized + data_server_info: DataServerConfigTypedDict = create_setup_config(data_server_cli) - data_server_info = _get_data_server_info(data_server_cli) + fixed_result = collect_data_source_state() + data_regression.check(fixed_result, basename="missing_data_source_all") - fixed_result = _collect_data_source_state( - language_server, data_server_info, ws_root_path + # Now, let's configure the file data source. + unconfigured_data_sources = fixed_result["unconfigured_data_sources"] + assert len(unconfigured_data_sources) == 3 + files_data_sources = [ + data_source + for data_source in unconfigured_data_sources + if data_source["engine"] == "files" + ] + + assert len(files_data_sources) == 1 + assert files_data_sources[0]["file"] == "files/customers.csv" + + def setup_data_source(datasource: DatasourceInfoTypedDict): + result = language_server_initialized.request( + { + "jsonrpc": "2.0", + "id": language_server_initialized.next_id(), + "method": "setupDataSource", + "params": { + "action_package_yaml_directory_uri": uris.from_fs_path( + ws_root_path + ), + "datasource": datasource, + "data_server_info": data_server_info, + }, + } + ) + if "result" not in result: + raise RuntimeError( + f"Expected result to be in the response. Full response: {json.dumps(result, indent=2)}" + ) + return result["result"] + + result = setup_data_source(files_data_sources[0]) + assert result[ + "success" + ], f"Expected success. Full result: {json.dumps(result, indent=2)}" + assert ( + "Uploaded file" in str(result["result"]) + ), "Expected 'Uploaded file' in the result. Full result: {json.dumps(result, indent=2)}" + + fixed_result = collect_data_source_state() + unconfigured_data_sources = fixed_result["unconfigured_data_sources"] + data_regression.check(fixed_result, basename="missing_data_source_sqlite") + assert len(unconfigured_data_sources) == 2 + + setup_sqlite_data_source(data_server_cli, tmpdir) + + fixed_result = collect_data_source_state() + data_regression.check(fixed_result, basename="missing_data_source_prediction") + unconfigured_data_sources = fixed_result["unconfigured_data_sources"] + assert len(unconfigured_data_sources) == 1 + + assert data_server_cli.http_connection + prediction_data_sources = [ + data_source + for data_source in unconfigured_data_sources + if data_source["engine"].startswith("prediction:") + ] + assert len(prediction_data_sources) == 1 + result = setup_data_source(prediction_data_sources[0]) + assert result[ + "success" + ], f"Expected success. Full result: {json.dumps(result, indent=2)}" + + wait_for_models_to_be_ready( + data_server_cli, {"models": ["predict_compute_data_sources_state"]} ) + + fixed_result = collect_data_source_state() + data_regression.check(fixed_result, basename="missing_data_source_none") + + +@pytest.mark.data_server +def setup_sqlite_data_source(data_server_cli, tmpdir): + import json + + from sema4ai_code_tests.data_server_fixtures import create_another_sqlite_sample_db + + params = json.dumps({"db_file": str(create_another_sqlite_sample_db(tmpdir))}) + engine = "sqlite" + data_server_cli.http_connection.run_sql( + f"CREATE DATABASE `test_compute_data_sources_state` ENGINE = '{engine}' , PARAMETERS = {params}", + ) + + +def test_compute_data_sources_state( + data_server_cli: DataServerCliWrapper, + ws_root_path, + datadir, + data_regression, + cleanup_data_sources, + tmpdir, + collect_data_source_state, +): + import shutil + + shutil.copytree(datadir / "package", ws_root_path) + + fixed_result = collect_data_source_state() data_regression.check(fixed_result, basename="missing_data_source_all") # Now, let's configure the file data source. @@ -177,27 +297,17 @@ def test_compute_data_sources_state( datadir / "package" / "files" / "customers.csv", "customers_in_test_compute_data_sources_state", ) - fixed_result = _collect_data_source_state( - language_server, data_server_info, ws_root_path - ) + fixed_result = collect_data_source_state() data_regression.check(fixed_result, basename="missing_data_source_sqlite") - params = json.dumps({"db_file": str(create_another_sqlite_sample_db(tmpdir))}) - engine = "sqlite" - data_server_cli.http_connection.run_sql( - f"CREATE DATABASE `test_compute_data_sources_state` ENGINE = '{engine}' , PARAMETERS = {params}", - ) + setup_sqlite_data_source(data_server_cli, tmpdir) - fixed_result = _collect_data_source_state( - language_server, data_server_info, ws_root_path - ) + fixed_result = collect_data_source_state() data_regression.check(fixed_result, basename="missing_data_source_prediction") _create_prediction_model(data_server_cli) - fixed_result = _collect_data_source_state( - language_server, data_server_info, ws_root_path - ) + fixed_result = collect_data_source_state() data_regression.check(fixed_result, basename="missing_data_source_none") @@ -234,6 +344,7 @@ def test_drop_data_sources( datadir, data_regression, cleanup_data_sources, + collect_data_source_state, tmpdir, ): import json @@ -244,7 +355,7 @@ def test_drop_data_sources( shutil.copytree(datadir / "package", ws_root_path) language_server = language_server_initialized - data_server_info = _get_data_server_info(data_server_cli) + data_server_info = create_setup_config(data_server_cli) assert data_server_cli.http_connection is not None data_server_cli.http_connection.upload_file( @@ -260,9 +371,7 @@ def test_drop_data_sources( _create_prediction_model(data_server_cli) - data_sources_state = _collect_data_source_state( - language_server, data_server_info, ws_root_path - ) + data_sources_state = collect_data_source_state() for data_source in data_sources_state["required_data_sources"]: result = language_server.request( diff --git a/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/missing_data_source_all.yml b/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/missing_data_source_all.yml index f74918d3..b44aa26b 100644 --- a/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/missing_data_source_all.yml +++ b/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/missing_data_source_all.yml @@ -37,11 +37,11 @@ required_data_sources: CREATE MODEL models.predict_compute_data_sources_state - (SELECT * FROM files.customers) + FROM files - PREDICT country + (SELECT * FROM customers_in_test_compute_data_sources_state) - ORDER BY first_name + PREDICT Index WINDOW 8 @@ -104,11 +104,11 @@ unconfigured_data_sources: CREATE MODEL models.predict_compute_data_sources_state - (SELECT * FROM files.customers) + FROM files - PREDICT country + (SELECT * FROM customers_in_test_compute_data_sources_state) - ORDER BY first_name + PREDICT Index WINDOW 8 diff --git a/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/missing_data_source_none.yml b/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/missing_data_source_none.yml index 879427df..956cedfd 100644 --- a/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/missing_data_source_none.yml +++ b/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/missing_data_source_none.yml @@ -37,11 +37,11 @@ required_data_sources: CREATE MODEL models.predict_compute_data_sources_state - (SELECT * FROM files.customers) + FROM files - PREDICT country + (SELECT * FROM customers_in_test_compute_data_sources_state) - ORDER BY first_name + PREDICT Index WINDOW 8 diff --git a/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/missing_data_source_prediction.yml b/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/missing_data_source_prediction.yml index 07c76d1e..efad67bb 100644 --- a/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/missing_data_source_prediction.yml +++ b/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/missing_data_source_prediction.yml @@ -37,11 +37,11 @@ required_data_sources: CREATE MODEL models.predict_compute_data_sources_state - (SELECT * FROM files.customers) + FROM files - PREDICT country + (SELECT * FROM customers_in_test_compute_data_sources_state) - ORDER BY first_name + PREDICT Index WINDOW 8 @@ -86,11 +86,11 @@ unconfigured_data_sources: CREATE MODEL models.predict_compute_data_sources_state - (SELECT * FROM files.customers) + FROM files - PREDICT country + (SELECT * FROM customers_in_test_compute_data_sources_state) - ORDER BY first_name + PREDICT Index WINDOW 8 diff --git a/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/missing_data_source_sqlite.yml b/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/missing_data_source_sqlite.yml index 73b6e40a..3347c550 100644 --- a/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/missing_data_source_sqlite.yml +++ b/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/missing_data_source_sqlite.yml @@ -37,11 +37,11 @@ required_data_sources: CREATE MODEL models.predict_compute_data_sources_state - (SELECT * FROM files.customers) + FROM files - PREDICT country + (SELECT * FROM customers_in_test_compute_data_sources_state) - ORDER BY first_name + PREDICT Index WINDOW 8 @@ -86,11 +86,11 @@ unconfigured_data_sources: CREATE MODEL models.predict_compute_data_sources_state - (SELECT * FROM files.customers) + FROM files - PREDICT country + (SELECT * FROM customers_in_test_compute_data_sources_state) - ORDER BY first_name + PREDICT Index WINDOW 8 diff --git a/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/package/data_sources.py b/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/package/data_sources.py index abc260e5..7911437f 100644 --- a/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/package/data_sources.py +++ b/sema4ai/tests/sema4ai_code_tests/test_compute_data_sources_state/package/data_sources.py @@ -22,9 +22,9 @@ description="Predict something.", setup_sql=""" CREATE MODEL models.predict_compute_data_sources_state -(SELECT * FROM files.customers) -PREDICT country -ORDER BY first_name +FROM files +(SELECT * FROM customers_in_test_compute_data_sources_state) +PREDICT Index WINDOW 8 HORIZON 4;""", ), diff --git a/sema4ai/vscode-client/src/extension.ts b/sema4ai/vscode-client/src/extension.ts index 9e557026..38c029c7 100644 --- a/sema4ai/vscode-client/src/extension.ts +++ b/sema4ai/vscode-client/src/extension.ts @@ -164,6 +164,7 @@ import { SEMA4AI_GET_ACTIONS_METADATA, SEMA4AI_DROP_DATA_SOURCE, SEMA4AI_LIST_ACTIONS_INTERNAL, + SEMA4AI_SETUP_DATA_SOURCE, } from "./robocorpCommands"; import { installWorkspaceWatcher } from "./pythonExtIntegration"; import { refreshCloudTreeView } from "./viewsRobocorp"; @@ -216,6 +217,7 @@ import { verifyDataExtensionIsInstalled, } from "./dataExtension"; import { QuickPickItemWithAction, showSelectOneQuickPick } from "./ask"; +import { setupDataSource } from "./robo/dataSourceHandling"; interface InterpreterInfo { pythonExe: string; @@ -525,6 +527,7 @@ function registerRobocorpCodeCommands(C: CommandRegistry, context: ExtensionCont C.register(SEMA4AI_CREATE_AGENT_PACKAGE, createAgentPackage); C.register(SEMA4AI_AGENT_PACKAGE_IMPORT, importAgentPackage); C.register(SEMA4AI_PACK_AGENT_PACKAGE, async (agentPath: string) => selectAndPackAgentPackage(agentPath)); + C.register(SEMA4AI_SETUP_DATA_SOURCE, (robot?: RobotEntry) => setupDataSource(robot)); C.register(SEMA4AI_OPEN_RUNBOOK_TREE_SELECTION, (robot: RobotEntry) => views.openRunbookTreeSelection(robot)); C.register(SEMA4AI_UPDATE_AGENT_VERSION, async (agentPath: string) => updateAgentVersion(agentPath)); C.register(SEMA4AI_REFRESH_AGENT_SPEC, async (agentPath: string) => refreshAgentSpec(agentPath)); diff --git a/sema4ai/vscode-client/src/protocols.ts b/sema4ai/vscode-client/src/protocols.ts index 9b3db52c..0041241a 100644 --- a/sema4ai/vscode-client/src/protocols.ts +++ b/sema4ai/vscode-client/src/protocols.ts @@ -205,6 +205,10 @@ export interface DatasourceInfo { engine: string; model_name?: string; created_table?: string; + description?: string; + python_variable_name?: string; + setup_sql?: string | string[]; + setup_sql_files?: string | string[]; } export interface DiagnosticInfo { diff --git a/sema4ai/vscode-client/src/robo/dataSourceHandling.ts b/sema4ai/vscode-client/src/robo/dataSourceHandling.ts new file mode 100644 index 00000000..f5fdaafc --- /dev/null +++ b/sema4ai/vscode-client/src/robo/dataSourceHandling.ts @@ -0,0 +1,40 @@ +import { commands, Uri, window } from "vscode"; +import { OUTPUT_CHANNEL } from "../channel"; +import { RobotEntry } from "../viewsCommon"; +import { DatasourceInfo } from "../protocols"; +import { langServer } from "../extension"; +import { DataServerConfig } from "./actionPackage"; +import { DATA_SERVER_START_COMMAND_ID } from "../dataExtension"; + +export const setupDataSource = async (entry?: RobotEntry) => { + if (!entry || !entry.extraData || !entry.extraData.datasource) { + window.showErrorMessage("Data source not specified."); + return; + } + + const dataServerInfo = (await commands.executeCommand(DATA_SERVER_START_COMMAND_ID, { + "showUIMessages": false, + })) as DataServerConfig | undefined; + if (!dataServerInfo) { + window.showErrorMessage( + "Unable to run (error getting local data server connection info and validating data sources):\n" + + JSON.stringify(dataServerInfo, null, 4) + ); + return false; + } + + OUTPUT_CHANNEL.appendLine("setupDataSource: " + JSON.stringify(entry)); + const datasource: DatasourceInfo = entry.extraData.datasource; + const result = await langServer.sendRequest("setupDataSource", { + action_package_yaml_directory_uri: Uri.file(entry.robot.directory).toString(), + datasource: datasource, + data_server_info: dataServerInfo, + }); + if (result["success"]) { + const messages = result["result"]; + window.showInformationMessage(messages.join("\n")); + } else { + const error = result["message"]; + window.showErrorMessage(error); + } +}; diff --git a/sema4ai/vscode-client/src/robocorpCommands.ts b/sema4ai/vscode-client/src/robocorpCommands.ts index fc7dfb7a..f2aa7b09 100644 --- a/sema4ai/vscode-client/src/robocorpCommands.ts +++ b/sema4ai/vscode-client/src/robocorpCommands.ts @@ -146,4 +146,5 @@ export const SEMA4AI_COLLAPSE_ALL_ENTRIES = "sema4ai.collapseAllEntries"; // Co export const SEMA4AI_IMPORT_ACTION_PACKAGE = "sema4ai.importActionPackage"; // Import Action Package export const SEMA4AI_RUN_ACTION_PACKAGE_DEV_TASK = "sema4ai.runActionPackageDevTask"; // Run dev-task (from Action Package) export const SEMA4AI_GET_ACTIONS_METADATA = "sema4ai.getActionsMetadata"; // Get Actions Metadata -export const SEMA4AI_DROP_DATA_SOURCE = "sema4ai.dropDataSource"; // Drop Data Sources \ No newline at end of file +export const SEMA4AI_DROP_DATA_SOURCE = "sema4ai.dropDataSource"; // Drop Data Sources +export const SEMA4AI_SETUP_DATA_SOURCE = "sema4ai.setupDataSource"; // Setup Data Source \ No newline at end of file