From 49e07e23a3731a826b9c2576d4572e7f6fd6faaa Mon Sep 17 00:00:00 2001 From: Levi Dooley Date: Thu, 19 May 2022 16:27:28 -0500 Subject: [PATCH 1/8] Improve performance of the stored_queries function (#89) Added optional parameter to StoredQuery constructor to pass in details of the query if they have already been retrieved. This is the case in the stored_queries function, so we can take advantage of that, and pass in the details to save numerous http calls. --- stardog/admin.py | 17 ++++++++++++----- 1 file changed, 12 insertions(+), 5 deletions(-) diff --git a/stardog/admin.py b/stardog/admin.py index 00bdf0c..6e8a419 100644 --- a/stardog/admin.py +++ b/stardog/admin.py @@ -269,8 +269,10 @@ def stored_queries(self): r = self.client.get( "/admin/queries/stored", headers={"Accept": "application/json"} ) - query_names = [q["name"] for q in r.json()["queries"]] - return list(map(lambda name: StoredQuery(name, self.client), query_names)) + queries = r.json()["queries"] + return list( + map(lambda query: StoredQuery(query["name"], self.client, query), queries) + ) def new_stored_query(self, name, query, options=None): """Creates a new Stored Query. @@ -1249,7 +1251,7 @@ class StoredQuery(object): https://www.stardog.com/docs/#_managing_stored_queries """ - def __init__(self, name, client): + def __init__(self, name, client, details=None): """Initializes a stored query. Use :meth:`stardog.admin.Admin.stored_query`, @@ -1260,8 +1262,13 @@ def __init__(self, name, client): self.query_name = name self.client = client self.path = "/admin/queries/stored/{}".format(name) - self.details = {} - self.__refresh() + + # We only need to call __refresh() if the details are not provided + if details is not None and isinstance(details, dict): + self.details = details + else: + self.details = {} + self.__refresh() def __refresh(self): details = self.client.get(self.path, headers={"Accept": "application/json"}) From 6bee7da43e3018321da12e5523ee4a5f040f2da6 Mon Sep 17 00:00:00 2001 From: Simon Cardenas Date: Tue, 25 Oct 2022 15:11:45 -0300 Subject: [PATCH 2/8] Adds data add server side --- dockerfiles/dockerfile-stardog | 1 + dockerfiles/example.ttl | 1 + stardog/connection.py | 46 +++++++++++++++++++++------------- test/test_integration.py | 8 ++++++ 4 files changed, 39 insertions(+), 17 deletions(-) create mode 100644 dockerfiles/example.ttl diff --git a/dockerfiles/dockerfile-stardog b/dockerfiles/dockerfile-stardog index f661d18..7516297 100644 --- a/dockerfiles/dockerfile-stardog +++ b/dockerfiles/dockerfile-stardog @@ -16,6 +16,7 @@ COPY --from=base /tmp/mysql-connector-java.jar opt/stardog/server/dbms COPY stardog-license-key.bin /var/opt/stardog COPY start.sh /var/start.sh COPY start-standby.sh /var/start-standby.sh +COPY example.ttl /tmp/example-remote.ttl USER root RUN yes $SSH_PASS | passwd $SSH_USER diff --git a/dockerfiles/example.ttl b/dockerfiles/example.ttl new file mode 100644 index 0000000..a55bc33 --- /dev/null +++ b/dockerfiles/example.ttl @@ -0,0 +1 @@ + . \ No newline at end of file diff --git a/stardog/connection.py b/stardog/connection.py index 7fbd20c..6dbd42a 100644 --- a/stardog/connection.py +++ b/stardog/connection.py @@ -125,32 +125,44 @@ def commit(self): self.client.post("/transaction/commit/{}".format(self.transaction)) self.transaction = None - def add(self, content, graph_uri=None): + def add(self, content, graph_uri=None, server_side=False): """Adds data to the database. - Args: - content (Content): Data to add - graph_uri (str, optional): Named graph into which to add the data - - Raises: - stardog.exceptions.TransactionException - If not currently in a transaction + :param content: Data to add to a graph. + :type content: Content, str + :param graph_uri: Named graph into which to add the data. + :type graph_uri: str, optional + :param server_side: Whether the file to load lives in the remote server. + :type server_side: bool + :raises stardog.exceptions.TransactionException If not currently in a transaction Examples: - >>> conn.add(File('example.ttl'), graph_uri='urn:graph') + Loads example.ttl from the current directory + >>> conn.add(File('example.ttl'), graph_uri='urn:graph') + + Loads /tmp/example.ttl existing in the remote stardog server, and loads it in the default graph. + >>> conn.add(File('/tmp/example.ttl'), server_side=True) """ self._assert_in_transaction() - with content.data() as data: - self.client.post( - "/{}/add".format(self.transaction), - params={"graph-uri": graph_uri}, - headers={ + args = {"params": {"graph-uri": graph_uri}} + + if server_side: + args["headers"] = { + "Content-Type": "application/json", + } + args["json"] = { + "filename": content.fname, + } + self.client.post("/{}/add".format(self.transaction), **args) + else: + with content.data() as data: + args["headers"] = { "Content-Type": content.content_type, "Content-Encoding": content.content_encoding, - }, - data=data, - ) + } + args["data"] = data + self.client.post("/{}/add".format(self.transaction), **args) def remove(self, content, graph_uri=None): """Removes data from the database. diff --git a/test/test_integration.py b/test/test_integration.py index d91e1fa..499a340 100644 --- a/test/test_integration.py +++ b/test/test_integration.py @@ -375,6 +375,14 @@ def test_data_add_ttl_from_file(self): c.commit() assert self.expected_count(1) + def test_data_add_ttl_from_file_server_side(self): + db = self.db + with self.connection() as c: + c.begin() + c.add(stardog.content.File("/tmp/example-remote.ttl"), server_side=True) + c.commit() + assert self.expected_count(1) + def test_data_add_ttl_from_content(self): db = self.db with self.connection() as c: From a42ebf9834cfd156490e594a6e787d46fd7b91d0 Mon Sep 17 00:00:00 2001 From: Clark Farley Date: Tue, 25 Oct 2022 16:08:41 -0400 Subject: [PATCH 3/8] Force Resource Existence when Creating Handle Call appropriate functions depending on the resource type to ensure a resource exists on the server when creating a handle. This ensures calling other functions using that handle will not throw an exception due to a non-existent resource. Resolves #85 --- .gitignore | 1 + stardog/admin.py | 20 ++++++++++++++++++++ test/test_admin.py | 15 +++++++++++++++ test/test_integration.py | 18 +++++++++++------- test/test_unit.py | 11 ++++++++--- 5 files changed, 55 insertions(+), 10 deletions(-) diff --git a/.gitignore b/.gitignore index 61fb144..e29fad2 100644 --- a/.gitignore +++ b/.gitignore @@ -105,6 +105,7 @@ venv.bak/ # macos stuff .idea/ .DS_Store +.vscode # sd license files stardog-license-key.bin diff --git a/stardog/admin.py b/stardog/admin.py index 6e8a419..5ee768d 100644 --- a/stardog/admin.py +++ b/stardog/admin.py @@ -7,6 +7,8 @@ import urllib from time import sleep +from stardog.exceptions import StardogException + from . import content_types as content_types from .http import client @@ -46,6 +48,8 @@ def __init__( username='admin', password='admin') """ self.client = client.Client(endpoint, None, username, password, auth=auth) + # ensure the server is alive and at the specified location + self.alive() def shutdown(self): """Shuts down the server.""" @@ -1059,6 +1063,9 @@ def __init__(self, name, client): self.client = client self.path = "/admin/databases/{}".format(name) + # this checks for existence by throwing an exception if the resource does not exist + self.client.get(self.path + "/options") + @property def name(self): """The name of the database.""" @@ -1263,6 +1270,9 @@ def __init__(self, name, client, details=None): self.client = client self.path = "/admin/queries/stored/{}".format(name) + # this checks for existence by throwing an exception if the resource does not exist + self.client.get(self.path) + # We only need to call __refresh() if the details are not provided if details is not None and isinstance(details, dict): self.details = details @@ -1354,6 +1364,8 @@ def __init__(self, name, client): self.username = name self.client = client self.path = "/admin/users/{}".format(name) + # this checks for existence by throwing an exception if the resource does not exist + self.client.get(self.path) @property def name(self): @@ -1537,6 +1549,8 @@ def __init__(self, name, client): self.role_name = name self.client = client self.path = "/admin/roles/{}".format(name) + # this checks for existence by throwing an exception if the resource does not exist + self.client.get(f"{self.path}/users") @property def name(self): @@ -1648,6 +1662,9 @@ def __init__(self, name, client): self.path = "/admin/virtual_graphs/{}".format(name) self.client = client + # this checks for existence by throwing an exception if the resource does not exist + self.client.get(f"{self.path}/info") + @property def name(self): """The name of the virtual graph.""" @@ -1786,6 +1803,9 @@ def __init__(self, name, client): self.path = "/admin/data_sources/{}".format(name) self.client = client + # this checks for existence by throwing an exception if the resource does not exist + self.client.get(f"{self.path}/info") + @property def name(self): """The name of the data source.""" diff --git a/test/test_admin.py b/test/test_admin.py index f73d58d..ec8d8e0 100644 --- a/test/test_admin.py +++ b/test/test_admin.py @@ -337,6 +337,10 @@ def test_databases(admin, conn_string, bulkload_content): def test_users(admin, conn_string): assert len(admin.users()) == len(DEFAULT_USERS) + # test non-existent user + with pytest.raises(exceptions.StardogException, match="User .* does not exist"): + admin.user("not a real user") + # new user user = admin.new_user("username", "password", False) @@ -393,6 +397,10 @@ def test_users(admin, conn_string): def test_roles(admin): assert len(admin.roles()) == len(DEFAULT_ROLES) + # test non-existent role + with pytest.raises(exceptions.StardogException, match="Role .* does not exist"): + admin.role("not a real role") + # users role = admin.role("reader") assert len(role.users()) > 0 @@ -638,6 +646,13 @@ def test_import(admin, conn_string, music_options, videos_options): assert 879 == count_records(bd.name, conn_string) +def test_data_source_does_not_exist(admin, music_options): + with pytest.raises( + exceptions.StardogException, match="There is no data source with name" + ): + admin.datasource("not a real data source") + + def test_data_source(admin, music_options): ds = admin.new_datasource("music", music_options) assert len(admin.datasources()) == 1 diff --git a/test/test_integration.py b/test/test_integration.py index 499a340..bea1a15 100644 --- a/test/test_integration.py +++ b/test/test_integration.py @@ -124,10 +124,11 @@ def db(self) -> stardog.admin.Database: @rtype: stardog.admin.Database """ - db = self.admin.database(self.db_name) + # db = self.admin.database(self.db_name) try: - db.drop() + # db.drop() + db = self.admin.database(self.db_name) except StardogException as e: if e.stardog_code != "0D0DU2": raise e @@ -172,12 +173,11 @@ def ds(self) -> stardog.admin.DataSource: @rtype: stardog.admin.DataSource """ - ds = self.admin.datasource(self.ds_name) try: - ds.delete() + self.admin.datasource(self.ds_name) except StardogException as e: - if e.http_code != 404: + if e.http_code != 400: raise e pass @@ -199,10 +199,9 @@ def vg(self) -> stardog.admin.VirtualGraph: @rtype: stardog.admin.VirtualGraph """ - ds = self.admin.virtual_graph(self.db_name) try: - ds.delete() + self.admin.virtual_graph(self.db_name) except StardogException as e: if e.stardog_code != "0D0DU2": raise e @@ -309,6 +308,11 @@ def test_bulkload(self): assert self.expected_count(6) assert self.expected_count(1, ng="") + def test_non_existent_db(self): + # test non-existent db + with pytest.raises(StardogException, match="does not exist"): + self.admin.database("not_real_db") + class TestDataSource(TestStardog): def test_datasource_creation(self): diff --git a/test/test_unit.py b/test/test_unit.py index be0b419..7b297f8 100644 --- a/test/test_unit.py +++ b/test/test_unit.py @@ -12,11 +12,12 @@ def test_materialize_graph_from_file_with_ds(self): from unittest.mock import patch, mock_open with patch("builtins.open", mock_open(read_data="data")) as mock_file: - with requests_mock.Mocker() as m: + with requests_mock.Mocker(real_http=True) as m: m.post( "http://localhost:5820/admin/virtual_graphs/import_db", status_code=204, ) + m.get("http://localhost:5820/admin/alive", status_code=200) admin = stardog.admin.Admin("http://localhost:5820", "admin", "admin") @@ -33,12 +34,13 @@ def test_materialize_graph_from_file_with_bad_ds(self): from unittest.mock import patch, mock_open with patch("builtins.open", mock_open(read_data="data")) as mock_file: - with requests_mock.Mocker() as m: + with requests_mock.Mocker(real_http=True) as m: m.post( "http://localhost:5820/admin/virtual_graphs/import_db", status_code=404, text="Data Source 'ds_sd_int_test' Not Found!", ) + m.get("http://localhost:5820/admin/alive", status_code=200) admin = stardog.admin.Admin("http://localhost:5820", "admin", "admin") @@ -75,6 +77,7 @@ def text_callback(request, context): status_code=200, text=text_callback, ) + m.get("http://localhost:5820/admin/alive", status_code=200) admin = stardog.admin.Admin("http://localhost:5820", "admin", "admin") @@ -87,7 +90,9 @@ def text_callback(request, context): ) def test_materialize_graph_missing_ds_or_options(self): - admin = stardog.admin.Admin("http://localhost:5820", "admin", "admin") + with requests_mock.Mocker() as m: + m.get("http://localhost:5820/admin/alive", status_code=200) + admin = stardog.admin.Admin("http://localhost:5820", "admin", "admin") try: admin.materialize_virtual_graph( From 346e2c421e885e91da6f1d60d287911638663ef8 Mon Sep 17 00:00:00 2001 From: Simon Cardenas Date: Mon, 14 Nov 2022 14:33:19 -0300 Subject: [PATCH 4/8] Refactor pystardog tests --- .circleci/config.yml | 134 ++++- README.md | 22 +- docker-compose.cluster.yml | 21 +- docker-compose.single-node.yml | 25 +- dockerfiles/dockerfile-python | 11 - stardog/admin.py | 18 +- test/conftest.py | 182 +++++- test/pytest.ini | 29 + test/test_admin.py | 983 --------------------------------- test/test_admin_basic.py | 870 +++++++++++++++++++++++++++++ test/test_cache.py | 217 ++++++++ test/test_cluster.py | 25 + test/test_connection.py | 16 +- test/test_integration.py | 620 --------------------- test/test_server_admin.py | 80 +++ test/test_single_node.py | 11 +- test/test_standby.py | 28 + test/test_unit.py | 177 +++--- test/test_utils.py | 3 +- test/utils.py | 90 +++ {utils => test/utils}/wait.sh | 9 +- utils/run_test_single_node.sh | 10 - utils/run_tests.sh | 9 - 23 files changed, 1805 insertions(+), 1785 deletions(-) delete mode 100644 dockerfiles/dockerfile-python create mode 100644 test/pytest.ini delete mode 100644 test/test_admin.py create mode 100644 test/test_admin_basic.py create mode 100644 test/test_cache.py create mode 100644 test/test_cluster.py delete mode 100644 test/test_integration.py create mode 100644 test/test_server_admin.py create mode 100644 test/test_standby.py create mode 100644 test/utils.py rename {utils => test/utils}/wait.sh (89%) delete mode 100755 utils/run_test_single_node.sh delete mode 100755 utils/run_tests.sh diff --git a/.circleci/config.yml b/.circleci/config.yml index fde420b..b7b1fc9 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,5 +1,66 @@ +--- version: 2.1 +run_basic_admin_suite: &run_basic_admin_suite + run: + name: Runs the basic admin suite + no_output_timeout: 15m + command: | + set -x + pytest test/test_admin_basic.py -s + +run_connection_suite: &run_connection_suite + run: + name: Runs the test_connection suite + no_output_timeout: 15m + command: | + set -x + pytest test/test_connection.py -s + +run_test_server_admin_suite: &run_test_server_admin_suite + run: + name: Runs the test_server_admin suite + no_output_timeout: 15m + command: | + set -x + pytest test/test_server_admin.py -s + +run_unit_tests: &run_unit_tests + run: + name: Runs the test_unit suite + no_output_timeout: 15m + command: | + set -x + pytest test/test_unit.py -s + +run_utils_tests: &run_utils_tests + run: + name: Runs the test_utils suite + no_output_timeout: 15m + command: | + set -x + pytest test/test_utils.py -s + +setup_pytest: &setup_pytest + run: + name: Set up local env + command: | + # This is only required because circle might not keep their machine executor completetly up to date. + # apt-get update is failing because of this, hence we add this workaround. We don't use any heroku cli at all in here. + # Main issue here https://discuss.circleci.com/t/heroku-gpg-issues-in-ubuntu-images/43834/3 + sudo rm -rf /etc/apt/sources.list.d/heroku.list + + sudo apt-get update -y + sudo apt-get install python3-pip -y + pip3 install -r test-requirements.txt -r requirements.txt + + +pull_license: &pull_license + run: + name: Pull license + command: | + echo ${pystardog_license} | base64 --decode > ~/project/dockerfiles/stardog-license-key.bin + jobs: format: docker: @@ -12,52 +73,85 @@ jobs: pip install -r test-requirements.txt black --check . - single_node_tests: - docker: - - image: cimg/base:2020.01 + basic_test_suite_single_node: + machine: + image: ubuntu-2204:2022.10.2 steps: - checkout + - <<: *pull_license + - run: - name: Pull license + name: Brings the single node and the required VGs up + no_output_timeout: 15m command: | - echo ${pystardog_license} | base64 --decode > ~/project/dockerfiles/stardog-license-key.bin + set -x + docker-compose -f docker-compose.single-node.yml up -d - - setup_remote_docker + - run: + name: Waits for stardog stack to come up + command: | + source test/utils/wait.sh + wait_for_start_single_node localhost 5820 + + - <<: *setup_pytest + - <<: *run_basic_admin_suite + - <<: *run_connection_suite + - <<: *run_test_server_admin_suite + - <<: *run_utils_tests + - <<: *run_unit_tests - run: - name: Start containers and run single node test that are not compatible with cluster + name: Runs the single_node_only_test (These tests only work on single node Stardog instances) no_output_timeout: 15m command: | set -x - docker-compose -f docker-compose.single-node.yml up --exit-code-from tests-single-node + pytest test/test_single_node.py --endpoint http://localhost:5820 -s - - cluster_tests: - docker: - - image: cimg/base:2020.01 + basic_test_cluster_mode: + machine: + image: ubuntu-2204:2022.10.2 steps: - checkout + - <<: *pull_license + - run: - name: Pull license + name: Start containers and cluster tests + no_output_timeout: 15m command: | - echo ${pystardog_license} | base64 --decode > ~/project/dockerfiles/stardog-license-key.bin + set -x + docker-compose -f docker-compose.cluster.yml up -d - - setup_remote_docker + - <<: *setup_pytest - run: - name: Start containers and cluster tests + name: Waits for stardog stack to come up + command: | + source test/utils/wait.sh + wait_for_start_cluster localhost 5820 + + - <<: *run_basic_admin_suite + - <<: *run_connection_suite + - <<: *run_test_server_admin_suite + - <<: *run_utils_tests + - <<: *run_unit_tests + + - run: + name: Runs the test the cluster only suite no_output_timeout: 15m command: | set -x - docker-compose -f docker-compose.cluster.yml up --exit-code-from tests + pytest test/test_cluster.py --endpoint http://localhost:5820 -s workflows: - build_and_run_cluster_tests: + build_and_test: jobs: - format - - single_node_tests - - cluster_tests + - basic_test_suite_single_node + - basic_test_cluster_mode + # Have to reenable cache and standby tests + # - cache_tests + # - standby_tests diff --git a/README.md b/README.md index 83b0c74..79c545a 100644 --- a/README.md +++ b/README.md @@ -33,12 +33,30 @@ Docs](http://pystardog.readthedocs.io) or can be built using Sphinx: To run the tests locally, a valid Stardog license is required and placed in the `dockerfiles/stardog-license-key.bin`. Docker and docker-compose are also required. +1) Bring a stardog instance using docker-compose. For testing about 90% of the pystardog features, just a single node is sufficient, +although we also provide a cluster set up for further testing. ```shell script - docker-compose -f docker-compose.single-node.yml up --exit-code-from tests-single-node - docker-compose -f docker-compose.cluster.yml up --exit-code-from tests +# Bring a single node instance plus a bunch of Virtual Graphs for testing (Recommended). +docker-compose -f docker-compose.single-node.yml up -d +# A cluster set up is also provided, if cluster only features are to be implemented and tested. +docker-compose -f docker-compose.cluster.yml up -d ``` +Run the test suite. Create a virtual environment with the neccesary dependencies: +```shell script +# Create a virtualenv and activate it +virtualenv $(which python3) venv +source venv/bin/activate + +# Install the dependencies +pip install -r requirements.txt -r test-requirements.txt + +# Run the basic test suite (covers most of the pystardog functionalities) +pytest test/test_admin_basic.py test/test_connection.py test/test_utils.py -s +``` + + ## Format To run a format of all the files ```shell script diff --git a/docker-compose.cluster.yml b/docker-compose.cluster.yml index 3af9b4a..2d2c7bf 100644 --- a/docker-compose.cluster.yml +++ b/docker-compose.cluster.yml @@ -81,30 +81,13 @@ services: args: - TAG=${HAPROXY_TAG} container_name: ${STARDOG_LB} + ports: + - "127.0.0.1:5820:5820" depends_on: - zoo1 - sd1 - sd2 - tests: - build: - context: . - dockerfile: dockerfiles/dockerfile-python - entrypoint: /bin/bash -c - command: ["./utils/run_tests.sh"] - container_name: pystardog_tests - environment: - - SSH_USER=${SSH_USER} - - SSH_PASS=${SSH_PASS} - - STARDOG_HOSTNAME_NODE_1=${STARDOG_HOSTNAME_NODE_1} - - STARDOG_HOSTNAME_CACHE=${STARDOG_HOSTNAME_CACHE} - - STARDOG_HOSTNAME_STANDBY=${STARDOG_HOSTNAME_STANDBY} - - STARDOG_ENDPOINT=http://${STARDOG_LB}:5820 - - STARDOG_LB=${STARDOG_LB} - - STARDOG_USER=${STARDOG_USER} - - STARDOG_PASS=${STARDOG_PASS} - - # two mysql servers are used instead of one so we can simulate multiple datasources. mysql-music: build: diff --git a/docker-compose.single-node.yml b/docker-compose.single-node.yml index 1620dee..02d841c 100644 --- a/docker-compose.single-node.yml +++ b/docker-compose.single-node.yml @@ -5,8 +5,6 @@ version: "3.3" services: - - # So another node that does not depends on zookeper. sd-single-node: build: context: dockerfiles/ @@ -17,15 +15,20 @@ services: container_name: ${STARDOG_HOSTNAME_SINGLE_NODE} entrypoint: ["/bin/bash", "-c"] command: ["/var/start.sh"] + ports: + - "127.0.0.1:5820:5820" + + # two mysql servers are used instead of one so we can simulate multiple datasources. + mysql-music: + build: + context: . + dockerfile: dockerfiles/dockerfile-mysql-music + # this value is hardcoded in conftest.py (music_options fixture), for multiple datasource / vg tests. + # it's also hardcoded in test/test_admin.py for testing imports. + container_name: pystardog_mysql_music - tests-single-node: + mysql-videos: build: context: . - dockerfile: dockerfiles/dockerfile-python - entrypoint: /bin/bash -c - command: ["./utils/run_test_single_node.sh"] - container_name: pystardog_tests_single_node - environment: - - STARDOG_ENDPOINT=${STARDOG_HOSTNAME_SINGLE_NODE} - - STARDOG_USER=${STARDOG_USER} - - STARDOG_PASS=${STARDOG_PASS} + dockerfile: dockerfiles/dockerfile-mysql-videos + container_name: pystardog_mysql_videos \ No newline at end of file diff --git a/dockerfiles/dockerfile-python b/dockerfiles/dockerfile-python deleted file mode 100644 index f78ada0..0000000 --- a/dockerfiles/dockerfile-python +++ /dev/null @@ -1,11 +0,0 @@ -FROM python - -RUN apt-get update && apt-get install sshpass jq -y - -COPY . /var/pystardog - -WORKDIR /var/pystardog -RUN pip install -r requirements.txt -r test-requirements.txt - - - diff --git a/stardog/admin.py b/stardog/admin.py index 5ee768d..8023515 100644 --- a/stardog/admin.py +++ b/stardog/admin.py @@ -762,7 +762,8 @@ def validate(self): Returns: bool: The connection state """ - self.client.get("/admin/users/valid") + r = self.client.get("/admin/users/valid") + return r.status_code == 200 def cluster_list_standby_nodes(self): """ @@ -1691,10 +1692,16 @@ def update(self, name, mappings, options={}, datasource=None, db=None): meta["mappings"] = mappings if options is not None: meta["options"] = options + if datasource is not None: meta["data_source"] = datasource + else: + meta["data_source"] = self.get_datasource() + if db is not None: meta["db"] = db + else: + meta["db"] = self.get_database() self.client.put(self.path, json=meta) self.graph_name = name @@ -1722,6 +1729,15 @@ def info(self): r = self.client.get(self.path + "/info") return r.json()["info"] + # should return object or name? + def get_datasource(self): + """Gets datasource associated with the VG + + :return: datasource name + """ + return self.info()["data_source"].replace("data-source://", "") + + # should return object or name? def get_database(self): """Gets database associated with the VirtualGraph. diff --git a/test/conftest.py b/test/conftest.py index f7424a1..7f75d6d 100644 --- a/test/conftest.py +++ b/test/conftest.py @@ -1,8 +1,10 @@ import pytest +import stardog import stardog.content as content import stardog.content_types as content_types import os + # STARDOG_ENDPOINT = os.environ.get('STARDOG_ENDPOINT', None) STARDOG_HOSTNAME_NODE_1 = os.environ.get("STARDOG_HOSTNAME_NODE_1", None) STARDOG_HOSTNAME_CACHE = os.environ.get("STARDOG_HOSTNAME_CACHE", None) @@ -47,6 +49,7 @@ def ssl_verify(pytestconfig): return pytestconfig.getoption("ssl_verify") +# this is currently not being used, need to confirm which one is definitive. @pytest.fixture def bulkload_content(): contents = [ @@ -100,13 +103,174 @@ def music_options(): return options +@pytest.fixture() +def admin(conn_string): + with stardog.admin.Admin(**conn_string) as admin: + yield admin + + @pytest.fixture -def videos_options(): - options = { - "jdbc.driver": "com.mysql.jdbc.Driver", - "jdbc.username": "user", - "jdbc.password": "pass", - "mappings.syntax": "STARDOG", - "jdbc.url": "jdbc:mysql://pystardog_mysql_videos/videos?useSSL=false", - } - return options +def conn(conn_string, request): + endpoint = request.node.get_closest_marker("conn_endpoint", None) + username = request.node.get_closest_marker("conn_username", None) + password = request.node.get_closest_marker("conn_password", None) + + if endpoint is not None: + endpoint = endpoint.args[0] + conn_string["endpoint"] = endpoint + + if username is not None: + username = username.args[0] + conn_string["username"] = username + + if password is not None: + password = password.args[0] + conn_string[password] = password + + dbname = request.node.get_closest_marker("conn_dbname", None) + if dbname is not None: + with stardog.connection.Connection(dbname.args[0], **conn_string) as c: + yield c + else: + raise Exception("A dbname must be passed with the marker: conn_dbname") + + +# The generic datasource is the music datasource. +@pytest.fixture() +def datasource(admin, music_options, request): + dsname = request.node.get_closest_marker("ds_name", None) + options = request.node.get_closest_marker("options", None) + if dsname is not None: + dsname = dsname.args[0] + else: + dsname = "datasource" + + if options is not None: + options = options.args[0] + else: + options = music_options + ds = admin.new_datasource(dsname, options) + yield ds + ds.delete() + + +@pytest.fixture() +def virtual_graph(admin, music_options, datasource, request): + vgname = request.node.get_closest_marker("vgname", None) + mappings = request.node.get_closest_marker("mappings", None) + virtual_graph_options = request.node.get_closest_marker( + "virtual_graph_options", None + ) + datasource_name = request.node.get_closest_marker("datasource_name", None) + database_name = request.node.get_closest_marker("database_name", None) + use_music_datasource = request.node.get_closest_marker("use_music_datasource", None) + + if vgname is not None: + vgname = vgname.args[0] + else: + vgname = "virtual_graph" + + if mappings is not None: + mappings = mappings.args[0] + + if virtual_graph_options is not None: + virtual_graph_options = virtual_graph_options.args[0] + + if database_name is not None: + database_name = database_name.args[0] + + if use_music_datasource: + datasource_name = datasource.name + + if virtual_graph_options is None and use_music_datasource is None: + raise Exception( + "Must pass either a datasource or VG options. See pytest marks in the pystest documentation" + ) + + vg = admin.new_virtual_graph( + vgname, + mappings=mappings, + options=virtual_graph_options, + datasource=datasource_name, + db=database_name, + ) + yield vg + vg.delete() + + +@pytest.fixture() +def user(admin, request): + username = request.node.get_closest_marker("user_username", None) + password = request.node.get_closest_marker("user_password", None) + if username is not None: + username = username.args[0] + else: + username = "pystardogUser" + + if password is not None: + password = password.args[0] + else: + password = "pystardogPass" + + user = admin.new_user(username, password, False) + yield user + user.delete() + + +@pytest.fixture() +def role(admin): + role = admin.new_role("writer") + yield role + role.delete() + + +@pytest.fixture +def db(admin, request): + dbname = request.node.get_closest_marker("dbname", None) + options = request.node.get_closest_marker("options", None) + contents = request.node.get_closest_marker("contents", None) + kwargs = request.node.get_closest_marker("kwargs", None) + + if dbname is not None: + dbname = dbname.args[0] + else: + dbname = "pystardog-test-db" + + if options is not None: + options = options.args[0] + + if contents is not None: + contents = contents.args[0] + + if kwargs is not None: + kwargs = kwargs.args[0] + + if contents is None and kwargs is None: + db = admin.new_database(dbname, options) + if kwargs is None and contents is not None: + db = admin.new_database(dbname, options, *contents) + if kwargs is not None and contents is None: + db = admin.new_database(dbname, options, **kwargs) + if kwargs is not None and contents is not None: + db = admin.new_database(dbname, options, *contents, **kwargs) + yield db + db.drop() + + +@pytest.fixture +def stored_query(admin, request): + query_name = request.node.get_closest_marker("query_name", None) + query = request.node.get_closest_marker("query", None) + + if query_name is not None: + query_name = query_name.args[0] + else: + query_name = "everything" + + if query is not None: + query = query.args[0] + else: + query = "select * where { ?s ?p ?o . }" + stored_query = admin.new_stored_query(query_name, query) + yield stored_query + stored_query.delete() diff --git a/test/pytest.ini b/test/pytest.ini new file mode 100644 index 0000000..33bfe8d --- /dev/null +++ b/test/pytest.ini @@ -0,0 +1,29 @@ +[pytest] +markers = + # stored queries + query_name: name of a stored query + + # databases + dbname: name of a database to be created + options: db options + contents: contents to pass to the db + kwargs: allow extra options to database creation. (only used to add the copy_to_server flag) + + + # datasources: + ds_name: name of the datasource to be created + options: options of the datasource. + + # user: + user_username: name of the username to be create + user_password: password for the user to create. + + # VG + vgname: name of the virtual graph to be created + use_music_datasource: Whether we want to use the music datasource as default + virtual_graph_options: virtual graph options + mappings: mappings to be used in a VG creation + database_name: name of the DB to associate the VG with. + + # Connection + conn_dbname: database name to be used with the stardog client. \ No newline at end of file diff --git a/test/test_admin.py b/test/test_admin.py deleted file mode 100644 index ec8d8e0..0000000 --- a/test/test_admin.py +++ /dev/null @@ -1,983 +0,0 @@ -import pytest -import os -from time import sleep -import subprocess - -import stardog.admin -import stardog.connection as connection -import stardog.content as content -import stardog.exceptions as exceptions - -DEFAULT_USERS = ["admin", "anonymous"] -DEFAULT_ROLES = ["reader"] - -SSH_USER = os.environ["SSH_USER"] -SSH_PASS = os.environ["SSH_PASS"] -STARDOG_HOSTNAME_NODE_1 = os.environ["STARDOG_HOSTNAME_NODE_1"] -STARDOG_HOSTNAME_STANDBY = os.environ["STARDOG_HOSTNAME_STANDBY"] - -################## -# Help functions # -################## -def get_node_ip(node_hostname): - node_ip = subprocess.run( - [ - "sshpass", - "-p", - SSH_PASS, - "ssh", - "-o", - "StrictHostKeyChecking=no", - f"ssh://{SSH_USER}@{node_hostname}:2222", - "--", - "hostname", - "-I", - ], - stdout=subprocess.PIPE, - ) - return node_ip.stdout - - -def get_current_node_count(admin): - return len(admin.cluster_info()["nodes"]) - - -def wait_standby_node_to_join(admin): - standby_node_ip = ( - get_node_ip(STARDOG_HOSTNAME_STANDBY).decode("utf-8").strip() + ":5820" - ) - retries = 0 - while True: - try: - if standby_node_ip in admin.cluster_info()["nodes"]: - print(f"current nodes: {admin.cluster_info()['nodes']}") - break - else: - print( - "http call did not fail, but node is still not listed in cluster info" - ) - except Exception as e: - print( - f"An exception ocurred while connecting to the standby node: {e}, will keep retrying" - ) - print(f"retries for now: {retries}") - retries += 1 - sleep(20) - if retries >= 50: - raise Exception("Took too long for standby node to join the cluster") - - -def count_records(bd_name, conn_string): - with connection.Connection(bd_name, **conn_string) as conn: - graph_name = conn.select("select ?g { graph ?g {}}")["results"]["bindings"][0][ - "g" - ]["value"] - q = conn.select("SELECT * { GRAPH <" + graph_name + "> { ?s ?p ?o }}") - count = len(q["results"]["bindings"]) - return count - - -def wait_for_cleaning_cache_target(admin, cache_target_name): - retries = 0 - while True: - cache_targets = admin.cache_targets() - cache_target_names = [cache_target.name for cache_target in cache_targets] - if cache_target_name in cache_target_names: - retries += 1 - sleep(1) - if retries >= 20: - raise Exception( - "Took too long to remove cache target: " + cache_target_name - ) - else: - return - - -def wait_for_creating_cache_target(admin, cache_target_name): - retries = 0 - while True: - cache_targets = admin.cache_targets() - cache_target_names = [cache_target.name for cache_target in cache_targets] - if cache_target_name not in cache_target_names: - retries += 1 - sleep(1) - if retries >= 20: - raise Exception( - "Took too long to register cache target: " + cache_target_name - ) - else: - return - - -@pytest.fixture() -def admin(conn_string): - with stardog.admin.Admin(**conn_string) as admin: - - # IMO we should remove these. reason being, if by mistake a user decides to run this - # tests against their own stardog deployment, and they don't go trough the code first, - # they are risking dropping all databases, users, roles, and others. - - # alternatively we could warn somewhere (README, when running the tests, or other places) - # that this are not intended to run against any other stardog deployment that is not a clean one. - - databases = admin.databases() - if databases: - for db in admin.databases(): - db.drop() - - users = admin.users() - if users: - for user in users: - if user.name not in DEFAULT_USERS: - user.delete() - - roles = admin.roles() - if roles: - for role in roles: - if role.name not in DEFAULT_ROLES: - role.delete() - - stored_queries = admin.stored_queries() - if stored_queries: - for stored_query in stored_queries: - stored_query.delete() - - cache_targets = admin.cache_targets() - if cache_targets: - for cache in cache_targets: - cache.remove() - - yield admin - - -@pytest.fixture() -def datasource_music(admin, music_options): - ds = admin.new_datasource("newtest", music_options) - yield ds - ds.delete() - - -@pytest.fixture() -def virtual_graph_music(admin, datasource_music): - vg = admin.new_virtual_graph( - "test_vg", mappings="", datasource=datasource_music.name - ) - yield vg - vg.delete() - - -############## -# Admin tests# -############## - - -@pytest.mark.skip( - reason="Implementation is not well documented, https://stardog.atlassian.net/browse/PLAT-2946" -) -def test_cluster_diagnostic_report(admin): - admin.cluster_diagnostic_reports() - - -def test_cluster_readonly(admin): - admin.cluster_start_readonly() - - with pytest.raises(exceptions.StardogException, match="The cluster is read only"): - admin.new_database("fail_db") - - admin.cluster_stop_readonly() - - new_db = admin.new_database("fail_db") - new_db.drop() - - -def test_coordinator_check(admin, conn_string): - - coordinator_info = admin.cluster_info()["coordinator"] - coordinator_conn_string = conn_string - coordinator_conn_string["endpoint"] = "http://" + coordinator_info - - with stardog.admin.Admin(**coordinator_conn_string) as admin_coordinator_check: - assert admin_coordinator_check.cluster_coordinator_check() - - -# We should pass a standby admin object instead of a connection string. -def test_cluster_standby(admin, cluster_standby_node_conn_string): - - with stardog.admin.Admin(**cluster_standby_node_conn_string) as admin_standby: - assert admin_standby.standby_node_pause(pause=True) - assert admin_standby.standby_node_pause_status()["STATE"] == "PAUSED" - assert admin_standby.standby_node_pause(pause=False) - assert admin_standby.standby_node_pause_status()["STATE"] == "WAITING" - - # Join a standby node is still allowed even if it's not part of the registry. - admin_standby.cluster_join() - wait_standby_node_to_join(admin_standby) - - # Make sure the standby node is part of the cluster - standby_node_info = ( - get_node_ip(STARDOG_HOSTNAME_STANDBY).decode("utf-8").strip() + ":5820" - ) - assert standby_node_info in admin_standby.cluster_info()["nodes"] - - standby_nodes = admin_standby.cluster_list_standby_nodes() - node_id = standby_nodes["standbynodes"][0] - # removes a standby node from the registry, i.e from syncing with the rest of the cluster. - admin_standby.cluster_revoke_standby_access(standby_nodes["standbynodes"][0]) - standby_nodes_revoked = admin_standby.cluster_list_standby_nodes() - assert node_id not in standby_nodes_revoked["standbynodes"] - - -def test_get_server_metrics(admin): - assert "dbms.storage.levels" in admin.get_server_metrics() - - -def test_get_prometheus_metrics(admin): - assert "TYPE databases_planCache_size gauge" in admin.get_prometheus_metrics() - - -def test_get_metadata_properties(admin): - assert "database.archetypes" in admin.get_all_metadata_properties() - - -def test_alive(admin): - assert admin.alive() - - -def test_healthcheck(admin): - assert admin.healthcheck() - - -def test_backup_all(admin): - admin.backup_all() - - default_backup = subprocess.run( - [ - "sshpass", - "-p", - SSH_PASS, - "ssh", - "-o", - "StrictHostKeyChecking=no", - "ssh://" + SSH_USER + "@" + STARDOG_HOSTNAME_NODE_1 + ":2222", - "--", - "ls", - "-la", - "/var/opt/stardog/", - ], - stdout=subprocess.PIPE, - universal_newlines=True, - ) - assert ".backup" in default_backup.stdout - - admin.backup_all(location="/tmp") - tmp_backup = subprocess.run( - [ - "sshpass", - "-p", - SSH_PASS, - "ssh", - "-o", - "StrictHostKeyChecking=no", - "ssh://" + SSH_USER + "@" + STARDOG_HOSTNAME_NODE_1 + ":2222", - "--", - "ls", - "-l", - "/tmp", - ], - stdout=subprocess.PIPE, - universal_newlines=True, - ) - assert "meta" in tmp_backup.stdout - - -# DEPRECATED, test moved to test_integration -def test_databases(admin, conn_string, bulkload_content): - current_databases = len(admin.databases()) - - # create database - db = admin.new_database("db", {"search.enabled": True, "spatial.enabled": True}) - - assert len(admin.databases()) == current_databases + 1 - assert db.name == "db" - assert db.get_options("search.enabled", "spatial.enabled") == { - "search.enabled": True, - "spatial.enabled": True, - } - - # change options - db.offline() - db.set_options({"spatial.enabled": False}) - db.online() - - assert db.get_options("search.enabled", "spatial.enabled") == { - "search.enabled": True, - "spatial.enabled": False, - } - - # optimize - db.optimize() - - # verify - db.verify() - - bl = admin.new_database("bulkload", {}, *bulkload_content, copy_to_server=True) - - with connection.Connection("bulkload", **conn_string) as c: - q = c.select("select * where { graph {?s ?p ?o}}") - assert len(q["results"]["bindings"]) == 1 - assert c.size() == 7 - - # clear - db.drop() - bl.drop() - - assert len(admin.databases()) == current_databases - - -def test_users(admin, conn_string): - assert len(admin.users()) == len(DEFAULT_USERS) - - # test non-existent user - with pytest.raises(exceptions.StardogException, match="User .* does not exist"): - admin.user("not a real user") - - # new user - user = admin.new_user("username", "password", False) - - assert len(admin.users()) == len(DEFAULT_USERS) + 1 - assert not user.is_superuser() - assert user.is_enabled() - - # check if able to connect - with stardog.admin.Admin(**conn_string) as uadmin: - uadmin.validate() - - # change password - user.set_password("new_password") - with stardog.admin.Admin( - endpoint=conn_string["endpoint"], username="username", password="new_password" - ) as uadmin: - uadmin.validate() - - # disable/enable - user.set_enabled(False) - assert not user.is_enabled() - user.set_enabled(True) - assert user.is_enabled() - - # roles - assert len(user.roles()) == 0 - - user.add_role("reader") - roles = user.roles() - assert len(user.roles()) == 1 - - user.set_roles(*roles) - assert len(user.roles()) == 1 - - user.remove_role("reader") - assert len(user.roles()) == 0 - - # permissions - assert user.permissions() == [ - {"action": "READ", "resource_type": "user", "resource": ["username"]}, - {"action": "WRITE", "resource_type": "user", "resource": ["username"]}, - ] - assert user.effective_permissions() == [ - {"action": "READ", "resource_type": "user", "resource": ["username"]}, - {"action": "WRITE", "resource_type": "user", "resource": ["username"]}, - ] - - # delete user - user.delete() - - assert len(admin.users()) == len(DEFAULT_USERS) - - -def test_roles(admin): - assert len(admin.roles()) == len(DEFAULT_ROLES) - - # test non-existent role - with pytest.raises(exceptions.StardogException, match="Role .* does not exist"): - admin.role("not a real role") - - # users - role = admin.role("reader") - assert len(role.users()) > 0 - - # new role - role = admin.new_role("writer") - assert len(admin.roles()) == len(DEFAULT_ROLES) + 1 - - # permissions - assert role.permissions() == [] - - role.add_permission("WRITE", "*", "*") - assert role.permissions() == [ - {"action": "WRITE", "resource_type": "*", "resource": ["*"]} - ] - - role.remove_permission("WRITE", "*", "*") - assert role.permissions() == [] - - # remove role - role.delete() - - assert len(admin.roles()) == len(DEFAULT_ROLES) - - -def test_queries(admin): - assert len(admin.queries()) == 0 - - with pytest.raises(exceptions.StardogException, match="Query not found: 1"): - admin.query(1) - - with pytest.raises(exceptions.StardogException, match="Query not found: 1"): - admin.kill_query(1) - - -def test_stored_queries(admin): - query = "select * where { ?s ?p ?o . }" - assert len(admin.stored_queries()) == 0 - - with pytest.raises(exceptions.StardogException, match="Stored query not found"): - admin.stored_query("not a real stored query") - - # add a stored query - stored_query = admin.new_stored_query("everything", query) - assert "everything" in [sq.name for sq in admin.stored_queries()] - - # get a stored query - stored_query_copy = admin.stored_query("everything") - assert stored_query_copy.query == query - - # update a stored query - assert stored_query.description is None - stored_query.update(description="get all the triples") - assert stored_query.description == "get all the triples" - - # delete a stored query - stored_query.delete() - assert len(admin.stored_queries()) == 0 - - # clear the stored queries - stored_query = admin.new_stored_query("everything", query) - stored_query = admin.new_stored_query("everything2", query) - assert len(admin.stored_queries()) == 2 - admin.clear_stored_queries() - assert len(admin.stored_queries()) == 0 - - -def test_vg_update(admin, music_options): - - ds = admin.new_datasource("music", music_options) - vg = admin.new_virtual_graph("test_vg", mappings="", datasource=ds.name) - assert "mappings.syntax" not in vg.options() - - vg.update( - "test_vg", mappings="", options={"mappings.syntax": "SMS2"}, datasource=ds.name - ) - assert "mappings.syntax" in vg.options() - - vg.delete() - ds.delete() - - -def test_vg_no_options(admin, music_options): - ds = admin.new_datasource("music", music_options) - vg = admin.new_virtual_graph("test_vg", mappings="", datasource=ds.name) - # namespace is the "default" option, so passing no options will still generate a namespace option - assert len(vg.options()) == 1 - vg.delete() - ds.delete() - - -def test_associate_vg_with_db(admin, music_options): - ds = admin.new_datasource("music", music_options) - vg = admin.new_virtual_graph( - "test_vg", mappings="", datasource=ds.name, db="somedb" - ) - assert "somedb" == vg.info()["database"] - vg.delete() - ds.delete() - - -def test_create_vg_with_data_source_specified_in_options(admin, music_options): - vg = admin.new_virtual_graph("test_vg", mappings="", options=music_options) - # namespace is a default option, so final option count will be 1 + number of options added - assert len(vg.options()) > 1 - vg.delete() - - -def test_vg_mappings(admin, music_options): - vg = admin.new_virtual_graph("test_vg", mappings="", options=music_options) - - # default is STARDOG - assert ( - "@prefix : ." - == vg.mappings_string().decode("utf-8")[0:37] - ) - - # we test the first string of the entire response, as the rest of the response contains randomly generated - # strings each time a mapping is generated, hence we can't know beforehand what to compare it to. - # we assume that if the first line of the response is what we expect, the rest of the mappings are retrieved successfully as well. - assert ( - "@prefix : ." - == vg.mappings_string("R2RML").decode("utf-8")[0:37] - ) - - vg.delete() - - -def test_create_vg_with_custom_mappings(admin, music_options): - vg = admin.new_virtual_graph( - "test_vg", - mappings=content.File("test/data/music_mappings.ttl"), - options=music_options, - ) - assert ( - "PREFIX : " - == vg.mappings_string().decode("utf-8")[0:39] - ) - vg.delete() - - -# this might be deprecated later, but we test it until then. -def test_mappings_old(admin, music_options): - vg = admin.new_virtual_graph("test_vg", mappings="", options=music_options) - assert ( - "@prefix : ." == vg.mappings().decode("utf-8")[0:37] - ) - vg.delete() - - -def test_datasource_preffered_over_options_for_vg_creation( - admin, music_options, conn_string -): - bad_options = { - "jdbc.driver": "com.mysql.jdbc.Driver", - "jdbc.username": "non-existent", - "jdbc.password": "non-existent", - "jdbc.url": "jdbc:mysql://non-existent", - } - - # querying a VG requires an existing BD. - admin.new_database("temp_bd") - ds = admin.new_datasource("music", music_options) - vg = admin.new_virtual_graph( - "test_vg", mappings="", options=bad_options, datasource=ds.name - ) - - with connection.Connection("temp_bd", **conn_string) as conn: - res = conn.select("SELECT * {GRAPH { ?s ?p ?o }} LIMIT 1") - assert ( - "http://api.stardog.com/Artist/id=1" - == res["results"]["bindings"][0]["s"]["value"] - ) - - vg.delete() - ds.delete() - - -def test_extra_options_should_be_passed_to_vg(admin, music_options): - ds = admin.new_datasource("music", music_options) - vg = admin.new_virtual_graph( - "test_vg", - mappings="", - options={"mappings.syntax": "SMS2"}, - datasource=ds.name, - db="no-db", - ) - assert "mappings.syntax" in vg.options() - vg.delete() - ds.delete() - - -def test_should_fail_when_no_datasource_is_passed(admin): - with pytest.raises( - exceptions.StardogException, match="Unable to determine data source type" - ): - admin.new_virtual_graph("vg", content.File("test/data/r2rml.ttl")) - - -def test_should_fail_if_vg_does_not_exists(admin): - with pytest.raises( - exceptions.StardogException, match="Virtual Graph non-existent Not Found!" - ): - vg = admin.virtual_graph("non-existent") - vg.available() - - -def test_import(admin, conn_string, music_options, videos_options): - bd = admin.new_database("test-db") - graph_name = "test-graph" - - # tests passing mappings - admin.import_virtual_graph( - "test-db", - mappings=content.File("test/data/music_mappings.ttl"), - named_graph=graph_name, - remove_all=True, - options=music_options, - ) - # specified mapping file generates a graph with total of 37 triples - assert 37 == count_records(bd.name, conn_string) - - # tests passing empty mappings - admin.import_virtual_graph( - "test-db", - mappings="", - named_graph=graph_name, - remove_all=True, - options=music_options, - ) - # if mapping is not specified, music bd generates a graph with 79 triples - assert 79 == count_records(bd.name, conn_string) - - # test removing_all false, it should return music records + video records. - admin.import_virtual_graph( - "test-db", - mappings="", - named_graph=graph_name, - remove_all=False, - options=videos_options, - ) - # if no mapping is specified, videos db generates a graph with 800 triples. adding un-mapped music sums up to 879. - assert 879 == count_records(bd.name, conn_string) - - -def test_data_source_does_not_exist(admin, music_options): - with pytest.raises( - exceptions.StardogException, match="There is no data source with name" - ): - admin.datasource("not a real data source") - - -def test_data_source(admin, music_options): - ds = admin.new_datasource("music", music_options) - assert len(admin.datasources()) == 1 - assert ds.name == "music" - assert ds.get_options() == music_options - ds.delete() - assert len(admin.datasources()) == 0 - - -def test_cache_targets(admin, cache_target_info): - - cache_target_name = cache_target_info["target_name"] - cache_target_hostname = cache_target_info["hostname"] - cache_target_port = cache_target_info["port"] - cache_target_username = cache_target_info["username"] - cache_target_password = cache_target_info["password"] - - cache_targets = admin.cache_targets() - assert len(cache_targets) == 0 - - cache_target = admin.new_cache_target( - cache_target_name, - cache_target_hostname, - cache_target_port, - cache_target_username, - cache_target_password, - ) - wait_for_creating_cache_target(admin, cache_target_name) - cache_targets = admin.cache_targets() - assert len(cache_targets) == 1 - - assert cache_target.info()["name"] == cache_target_name - assert cache_target.info()["port"] == cache_target_port - assert cache_target.info()["hostname"] == cache_target_hostname - assert cache_target.info()["username"] == cache_target_username - - # test remove() - cache_target.remove() - wait_for_cleaning_cache_target(admin, cache_target.name) - cache_targets = admin.cache_targets() - assert len(cache_targets) == 0 - - # tests orphan - cache_target = admin.new_cache_target( - cache_target_name, - cache_target_hostname, - cache_target_port, - cache_target_username, - cache_target_password, - ) - wait_for_creating_cache_target(admin, cache_target_name) - cache_target.orphan() - wait_for_cleaning_cache_target(admin, cache_target.name) - cache_targets = admin.cache_targets() - assert len(cache_targets) == 0 - # recall that removing a cache is an idempotent operation, and will always (unless it fails) - # return that cache target was removed, even if it doesn't exists in the first place - # Removing a an orphaned cache target will not delete the data, because the target is already orphaned - # We need to recreate the orphaned cached target (using use_existing_db) in order to fully delete its data - cache_target = admin.new_cache_target( - cache_target_name, - cache_target_hostname, - cache_target_port, - cache_target_username, - cache_target_password, - use_existing_db=True, - ) - wait_for_creating_cache_target(admin, cache_target_name) - cache_target.remove() - - -def test_cache_ng_datasets(admin, bulkload_content, cache_target_info): - - cache_target_name = cache_target_info["target_name"] - cache_target_hostname = cache_target_info["hostname"] - cache_target_port = cache_target_info["port"] - cache_target_username = cache_target_info["username"] - cache_target_password = cache_target_info["password"] - - cache_target = admin.new_cache_target( - cache_target_name, - cache_target_hostname, - cache_target_port, - cache_target_username, - cache_target_password, - ) - wait_for_creating_cache_target(admin, cache_target_name) - - bl = admin.new_database("bulkload", {}, *bulkload_content, copy_to_server=True) - - assert len(admin.cached_graphs()) == 0 - cached_graph_name = "cache://cached-ng" - cached_graph = admin.new_cached_graph( - cached_graph_name, cache_target.name, "urn:context", bl.name - ) - - assert len(admin.cached_graphs()) == 1 - - cached_graph_status = cached_graph.status() - assert cached_graph_status[0]["name"] == cached_graph_name - assert cached_graph_status[0]["target"] == cache_target.name - cached_graph.refresh() - cached_graph.drop() - assert len(admin.cached_graphs()) == 0 - - -def test_cache_vg_datasets(admin, music_options, cache_target_info): - - cache_target_name = cache_target_info["target_name"] - cache_target_hostname = cache_target_info["hostname"] - cache_target_port = cache_target_info["port"] - cache_target_username = cache_target_info["username"] - cache_target_password = cache_target_info["password"] - - # creating a VG using empty mappings, and specifying a datasource - ds = admin.new_datasource("music", music_options) - vg = admin.new_virtual_graph("test_vg", mappings="", datasource=ds.name) - - cache_target = admin.new_cache_target( - cache_target_name, - cache_target_hostname, - cache_target_port, - cache_target_username, - cache_target_password, - ) - wait_for_creating_cache_target(admin, cache_target_name) - # We need to register the VG into the cache target as well. - - conn_string_cache = { - "endpoint": "http://" + cache_target_hostname + ":5820", - "username": "admin", - "password": "admin", - } - - with stardog.admin.Admin(**conn_string_cache) as admin_cache_target: - ds_cached = admin_cache_target.new_datasource("music", music_options) - vg_cached = admin_cache_target.new_virtual_graph( - "test_vg", mappings="", datasource=ds.name - ) - - assert len(admin.cached_graphs()) == 0 - - cached_graph_name = "cache://cached-vg" - cached_graph = admin.new_cached_graph( - cached_graph_name, cache_target.name, "virtual://" + vg.name - ) - - assert len(admin.cached_graphs()) == 1 - - cached_graph_status = cached_graph.status() - assert cached_graph_status[0]["name"] == cached_graph_name - assert cached_graph_status[0]["target"] == cache_target.name - cached_graph.refresh() - cached_graph.drop() - assert len(admin.cached_graphs()) == 0 - - cache_target.remove() - wait_for_cleaning_cache_target(admin, cache_target.name) - - vg.delete() - ds.delete() - - -@pytest.mark.skip( - reason="Caching queries is no longer supported. We are skipping we but should make sure it still works for older SD versions" -) -def test_cache_query_datasets(admin, bulkload_content, cache_target_info): - - cache_target_name = cache_target_info["target_name"] - cache_target_hostname = cache_target_info["hostname"] - cache_target_port = cache_target_info["port"] - cache_target_username = cache_target_info["username"] - cache_target_password = cache_target_info["password"] - - cache_target = admin.new_cache_target( - cache_target_name, - cache_target_hostname, - cache_target_port, - cache_target_username, - cache_target_password, - ) - wait_for_creating_cache_target(admin, cache_target_name) - - bl = admin.new_database("bulkload", {}, *bulkload_content, copy_to_server=True) - - assert len(admin.cached_queries()) == 0 - - cached_query_name = "cache://my_new_query_cached" - - cached_query = admin.new_cached_query( - cached_query_name, cache_target.name, "SELECT * { ?s ?p ?o }", bl.name - ) - # wait_for_creating_cache_dataset(admin, cached_query_name, 'query') - - assert len(admin.cached_queries()) == 1 - - cached_query_status = cached_query.status() - assert cached_query_status[0]["name"] == cached_query_name - assert cached_query_status[0]["target"] == cache_target.name - cached_query.refresh() - cached_query.drop() - assert len(admin.cached_queries()) == 0 - - cache_target.remove() - wait_for_cleaning_cache_target(admin, cache_target.name) - - -def test_import_namespaces(admin): - # we want to tests more than 1 file format - namespaces_ttl = content.File("test/data/namespaces.ttl") - namespaces_rdf = content.File("test/data/namespaces.xml") - - db = admin.new_database("test_db") - db_default_namespaces = db.namespaces() - ns_default_count = len(db_default_namespaces) - - # number of namespaces is always 6 by default for any new database - # https://docs.stardog.com/operating-stardog/database-administration/managing-databases#namespaces - assert ns_default_count == 6 - - # imports 4 namespaces - db.import_namespaces(namespaces_ttl) - ttl_ns_count = len(db.namespaces()) - assert ns_default_count + 4 == ttl_ns_count - - # imports 2 namespaces - db.import_namespaces(namespaces_rdf) - rdf_ns_count = len(db.namespaces()) - assert ttl_ns_count + 2 == rdf_ns_count - - db.drop() - - -def test_add_and_delete_namespaces(admin): - - db = admin.new_database("test_db") - - # number of namespaces is always 6 by default for any new database - assert len(db.namespaces()) == 6 - - db.add_namespace("testns", "my:test:IRI") - assert len(db.namespaces()) == 7 - - # tests a failure while adding an existing namespace - with pytest.raises(Exception, match="Namespace already exists for this database"): - db.add_namespace("stardog", "someiri") - - db.remove_namespace("testns") - assert len(db.namespaces()) == 6 - - # tests a failure while removing an existing namespace - with pytest.raises(Exception, match="Namespace does not exists for this database"): - db.remove_namespace("non-existent-ns") - - # tests insertion of a pair of namespaces that is a substring of the first - db.add_namespace("testnspace", "my:test:IRI") - db.add_namespace("testns", "my:test:IRI") - - assert len(db.namespaces()) == 8 - - # tests removal of the correct namespace, even if a similar namespace exists - db.remove_namespace("testns") - db.remove_namespace("testnspace") - - assert len(db.namespaces()) == 6 - - db.drop() - - -def test_database_exists_in_databases_list(admin): - db = admin.new_database("my_db") - all_databases = admin.databases() - assert db in all_databases - db.drop() - - -def test_datasource_exists_in_datasource_list(admin, datasource_music): - all_datasources = admin.datasources() - assert datasource_music in all_datasources - - -@pytest.mark.skip( - reason="We need to get sorted whether we want users to deal with prefix:// for vg/ds operations" -) -def test_vg_exists_in_vg_list(admin, virtual_graph_music): - all_vgs = admin.virtual_graphs() - assert virtual_graph_music in all_vgs - - -def test_user_exists_in_user_list(admin): - user = admin.new_user("username", "password", False) - all_users = admin.users() - assert user in all_users - user.delete() - - -def test_role_exists_in_role_list(admin): - role = admin.new_role("myrole") - all_roles = admin.roles() - assert role in all_roles - role.delete() - - -def test_stored_query_in_stored_query_list(admin): - query = "select * where { ?s ?p ?o . }" - stored_query = admin.new_stored_query("everything", query) - assert stored_query in admin.stored_queries() - - -def test_cache_target_in_cache_target_list(admin, cache_target_info): - cache_target_name = cache_target_info["target_name"] - cache_target_hostname = cache_target_info["hostname"] - cache_target_port = cache_target_info["port"] - cache_target_username = cache_target_info["username"] - cache_target_password = cache_target_info["password"] - - cache_targets = admin.cache_targets() - assert len(cache_targets) == 0 - - cache_target = admin.new_cache_target( - cache_target_name, - cache_target_hostname, - cache_target_port, - cache_target_username, - cache_target_password, - ) - wait_for_creating_cache_target(admin, cache_target_name) - assert cache_target in admin.cache_targets() diff --git a/test/test_admin_basic.py b/test/test_admin_basic.py new file mode 100644 index 0000000..6abbae8 --- /dev/null +++ b/test/test_admin_basic.py @@ -0,0 +1,870 @@ +import os +import re +from enum import Enum + +import pytest +from stardog import admin, connection, content, content_types, exceptions + +############################################################### +# +# These test can be run against a cluster or standalone server +# +############################################################### +from stardog.exceptions import StardogException + +default_users = ["admin", "anonymous"] +default_roles = ["reader"] + + +class Resource(Enum): + DB = "db_sd_int_test" + DS = "ds_sd_int_test" + VG = "vg_sd_int_test" + NG = "http://example.org/graph" + + +class TestStardog: + is_local = ( + True + if "localhost" in os.environ.get("STARDOG_ENDPOINT", "http://localhost:5820") + and not os.path.exists("/.dockerenv") + else False + ) + + def setup_method(self, test_method): + """ + Before each test a fresh stardog admin and credential object will be provided, just in case it got corrupted. + + @rtype: None + + """ + + if not os.path.isdir("data") and not os.path.islink("data"): + os.symlink("test/data", "data") + + # This is too inefficient for running all tests. we don't want to clean up everything after every single test + # we only want to clean up the resources we created. + # also we should use fixtures for creating resources, so that we can create and clean up in the same test. + # teardown must be managed by fixtures + def teardown_method(self, test_method): + """ + After each test this will destroy all resources on the instance. + + @rtype: None + """ + + # maybe these two can get merged together, as they do virtually the same. + def expected_count(self, conn, expected=1, ng: str = "stardog:context:default"): + q = conn.select(f"select * where {{ graph {ng} {{?s ?p ?o}} }}") + return len(q["results"]["bindings"]) == expected + + def count_records(self, bd_name, conn_string): + with connection.Connection(bd_name, **conn_string) as conn: + graph_name = conn.select("select ?g { graph ?g {}}")["results"]["bindings"][ + 0 + ]["g"]["value"] + q = conn.select("SELECT * { GRAPH <" + graph_name + "> { ?s ?p ?o }}") + count = len(q["results"]["bindings"]) + return count + + @property + def ng(self) -> str: + """ + This method return the default named-graph string + + @return: str + """ + return Resource.NG.value + + ################################################################################################################ + # + # Database helpers + # + ################################################################################################################ + + # This is redefined too many time. Need to choose one, and stick to that one (as a fixture, and as a private method in the database class) + @property + def music_options(self): + if TestStardog.is_local: + return { + "jdbc.url": "jdbc:sqlite:/tmp/music.db", + "jdbc.username": "whatever", + "jdbc.password": "whatever", + "jdbc.driver": "org.sqlite.JDBC", + "sql.default.schema": "main", + "sql.defaults": "main", + "sql.skip.validation": "true", + "sql.dialect": "POSTGRESQL", + } + else: + return { + "jdbc.driver": "com.mysql.jdbc.Driver", + "jdbc.username": "user", + "jdbc.password": "pass", + "mappings.syntax": "STARDOG", + "jdbc.url": "jdbc:mysql://pystardog_mysql_music/music?useSSL=false", + } + + +class TestUsers(TestStardog): + def test_user_creation(self, admin, user): + assert len(admin.users()) == len(default_users) + 1 + assert not user.is_superuser() + assert user.is_enabled() + + @pytest.mark.user_username("userCanChangePass") + @pytest.mark.user_password("userCanChangePass") + def test_user_can_change_password(self, conn_string, user): + user.set_password("new_password") + with admin.Admin( + endpoint=conn_string["endpoint"], + username="userCanChangePass", + password="new_password", + ) as admin_as_user: + assert admin_as_user.validate() + + @pytest.mark.user_username("userCanValidate") + @pytest.mark.user_password("userCanValidate") + def test_new_user_can_connect(self, conn_string, user): + with admin.Admin( + endpoint=conn_string["endpoint"], + username="userCanValidate", + password="userCanValidate", + ) as admin_as_user: + assert admin_as_user.validate() + + def test_disable_enable_user(self, user): + user.set_enabled(False) + assert not user.is_enabled() + user.set_enabled(True) + assert user.is_enabled() + + def test_user_roles(self, user): + assert len(user.roles()) == 0 + + user.add_role("reader") + roles = user.roles() + assert len(user.roles()) == 1 + + user.set_roles(*roles) + assert len(user.roles()) == 1 + + user.remove_role("reader") + assert len(user.roles()) == 0 + + @pytest.mark.user_username("testUserPermissions") + @pytest.mark.user_password("testUserPermissions") + def test_user_permissions(self, user): + assert user.permissions() == [ + { + "action": "WRITE", + "resource_type": "user", + "resource": ["testUserPermissions"], + }, + { + "action": "READ", + "resource_type": "user", + "resource": ["testUserPermissions"], + }, + ] + assert user.effective_permissions() == [ + { + "action": "WRITE", + "resource_type": "user", + "resource": ["testUserPermissions"], + }, + { + "action": "READ", + "resource_type": "user", + "resource": ["testUserPermissions"], + }, + ] + + def test_user_exists_in_user_list(self, admin, user): + all_users = admin.users() + assert user in all_users + + def test_non_existent_user_should_not_get_a_handle(self, admin): + with pytest.raises(exceptions.StardogException, match="User .* does not exist"): + admin.user("not a real user") + + +class TestRoles(TestStardog): + def test_role_creation(self, admin, role): + assert len(admin.roles()) == len(default_roles) + 1 + + def test_role_permissions_empty(self, role): + assert role.permissions() == [] + + def test_role_add_and_remove_permission(self, role): + role.add_permission("WRITE", "*", "*") + assert role.permissions() == [ + {"action": "WRITE", "resource_type": "*", "resource": ["*"]} + ] + + role.remove_permission("WRITE", "*", "*") + assert role.permissions() == [] + + def test_role_exists_in_role_list(self, admin, role): + all_roles = admin.roles() + assert role in all_roles + + def test_non_existing_role_should_not_get_a_handle(self, admin): + with pytest.raises(exceptions.StardogException, match="Role .* does not exist"): + admin.role("not a real role") + + +class TestDatabase(TestStardog): + def _options(): + return {"search.enabled": True, "spatial.enabled": True} + + def _bld() -> list: + contents = [ + content.Raw( + " .", + content_types.TURTLE, + name="bulkload.ttl", + ), + (content.File("data/example.ttl.zip"), "urn:context"), + content.URL( + "https://www.w3.org/2000/10/rdf-tests/" + "RDF-Model-Syntax_1.0/ms_4.1_1.rdf" + ), + ] + return contents + + @pytest.mark.dbname("pystardog-db-name") + def test_create_db(self, db): + assert db.name == "pystardog-db-name" + + def test_default_db_properties(self, db): + assert db.get_options("search.enabled", "spatial.enabled") == { + "search.enabled": False, + "spatial.enabled": False, + } + + @pytest.mark.options(_options()) + def test_new_database_with_properties(self, db): + assert db.get_options("search.enabled", "spatial.enabled") == { + "search.enabled": True, + "spatial.enabled": True, + } + + def test_online_offline(self, db): + # change options + assert db.get_options("database.online") == {"database.online": True} + db.offline() + assert db.get_options("database.online") == {"database.online": False} + db.online() + assert db.get_options("database.online") == {"database.online": True} + + def test_get_all_options(self, db): + options = db.get_all_options() + assert len(options.keys()) > 150 + + def test_optimized(self, db): + db.optimize() + # for now this is the best we can do + assert True + + def test_verity(self, db): + db.verify() + # for now this is the best we can do + assert True + + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + # use of BULKLOAD, need to choose which one to leave as final. + @pytest.mark.contents(_bld()) + @pytest.mark.kwargs({"copy_to_server": True}) + def test_bulkload(self, db, conn): + assert self.expected_count(conn, 6) + assert self.expected_count(conn, 1, ng="") + + def test_database_exists_in_databases_list(self, db, admin): + all_databases = admin.databases() + assert db in all_databases + + def test_non_existent_db_should_not_get_a_handle(self, admin): + with pytest.raises(StardogException, match="does not exist"): + admin.database("not_real_db") + + +# A namespace fixture creates a database, and assigns the namespace to that database. +# both gets cleaned up as part of the fixture. +class TestNamespaces(TestStardog): + def test_add_and_delete_namespaces(self, db): + # number of namespaces is always 6 by default for any new database + assert len(db.namespaces()) == 6 + + db.add_namespace("testns", "my:test:IRI") + assert len(db.namespaces()) == 7 + + # tests a failure while adding an existing namespace + with pytest.raises( + Exception, match="Namespace already exists for this database" + ): + db.add_namespace("stardog", "someiri") + + db.remove_namespace("testns") + assert len(db.namespaces()) == 6 + + # tests a failure while removing an existing namespace + with pytest.raises( + Exception, match="Namespace does not exists for this database" + ): + db.remove_namespace("non-existent-ns") + + # tests insertion of a pair of namespaces that is a substring of the first + db.add_namespace("testnspace", "my:test:IRI") + db.add_namespace("testns", "my:test:IRI") + + assert len(db.namespaces()) == 8 + + # tests removal of the correct namespace, even if a similar namespace exists + db.remove_namespace("testns") + db.remove_namespace("testnspace") + + assert len(db.namespaces()) == 6 + + def test_import_namespaces(self, db): + # we want to tests more than 1 file format + namespaces_ttl = content.File("test/data/namespaces.ttl") + namespaces_rdf = content.File("test/data/namespaces.xml") + + db_default_namespaces = db.namespaces() + ns_default_count = len(db_default_namespaces) + + # number of namespaces is always 6 by default for any new database + # https://docs.stardog.com/operating-stardog/database-administration/managing-databases#namespaces + assert ns_default_count == 6 + + # imports 4 namespaces + db.import_namespaces(namespaces_ttl) + ttl_ns_count = len(db.namespaces()) + assert ns_default_count + 4 == ttl_ns_count + + # imports 2 namespaces + db.import_namespaces(namespaces_rdf) + rdf_ns_count = len(db.namespaces()) + assert ttl_ns_count + 2 == rdf_ns_count + + +class TestDataSource(TestStardog): + @pytest.mark.ds_name("pystardog-test-datasource") + def test_datasource_creation(self, admin, datasource): + ds = admin.datasource("pystardog-test-datasource") + assert ds.name == "pystardog-test-datasource" + + def test_datasource_exists_in_datasource_list(self, admin, datasource): + all_datasources = admin.datasources() + assert datasource in all_datasources + + def test_non_existent_datasource_should_not_get_a_handle(self, admin): + with pytest.raises( + exceptions.StardogException, match="There is no data source with name" + ): + admin.datasource("not a real data source") + + +class TestLoadData(TestStardog): + def setup_class(self): + self.run_vg_test = True + + if TestStardog.is_local: + # Let's check if we have the sqlite driver + libpath = os.environ.get("STARDOG_EXT", None) + + driver_found = False + if libpath: + d = re.compile("sqlite-jdbc") + for file in os.listdir(libpath): + if d.match(file): + driver_found = True + + if driver_found: + if not os.path.exists("/tmp/music.db"): + import sqlite3 + from sqlite3 import Error + + conn = None + try: + conn = sqlite3.connect("/tmp/music.db") + with open("data/music_schema.sql") as f: + conn.executescript(f.read()) + with open("data/beatles.sql") as f: + conn.executescript(f.read()) + except Error as e: + self.run_vg_test = False + except FileNotFoundError as e: + self.run_vg_test = False + finally: + if conn: + conn.close() + else: + self.run_vg_test = False + self.msg_vg_test = """ +No sqlite driver detected, all virtual graph test will be disabled +Download driver from https://search.maven.org/artifact/org.xerial/sqlite-jdbc +And install in directory pointed to by STARDOG_EXT and restart server +""" + + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_data_add_ttl_from_file(self, db, conn): + conn.begin() + conn.add(content.File("data/example.ttl")) + conn.commit() + assert self.expected_count(conn, 1) + + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_data_add_ttl_from_file_server_side(self, db, conn): + conn.begin() + conn.add(content.File("/tmp/example-remote.ttl"), server_side=True) + conn.commit() + assert self.expected_count(conn, 1) + + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_data_add_ttl_from_content(self, db, conn): + with open("data/example.ttl") as f: + conn.begin() + conn.add(content.Raw(f.read(), name="example.ttl")) + conn.commit() + assert self.expected_count(conn, 1) + + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_data_add_ttl_from_file_ns(self, db, conn): + conn.begin() + conn.add(content.File("data/example.ttl"), graph_uri=self.ng) + conn.commit() + assert self.expected_count(conn, 1, ng=f"<{self.ng}>") + + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_import_csv_from_file(self, admin, db, conn): + admin.import_file( + db.name, + content.MappingFile("data/test_import_delimited.sms"), + content.ImportFile("data/test_import_delimited.csv"), + ) + assert self.expected_count(conn, 145961) + + # nested withs can be merged into 1 + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_import_csv_from_content(self, admin, db, conn): + with open("data/test_import_delimited.csv") as csv: + with open("data/test_import_delimited.sms") as sms: + admin.import_file( + db.name, + content.MappingRaw(sms.read()), + content.ImportRaw(csv.read(), name="data.csv"), + ) + assert self.expected_count(conn, 145961) + + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_import_csv_from_file_ns(self, admin, db, conn): + admin.import_file( + db.name, + content.MappingFile("data/test_import_delimited.sms"), + content.ImportFile("data/test_import_delimited.csv"), + None, + self.ng, + ) + assert self.expected_count(conn, 145961, ng=f"<{self.ng}>") + + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_import_tsv_from_file(self, admin, db, conn): + admin.import_file( + db.name, + content.MappingFile("data/test_import_delimited.sms"), + content.ImportFile("data/test_import_delimited.csv"), + ) + assert self.expected_count(conn, 145961) + + # 2 nested with can be merged + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_import_tsv_from_content(self, admin, db, conn): + with open("data/test_import_delimited.csv") as tsv: + with open("data/test_import_delimited.sms") as sms: + admin.import_file( + db.name, + content.MappingRaw(sms.read()), + content.ImportRaw(tsv.read(), name="data.csv"), + ) + assert self.expected_count(conn, 145961) + + # put the tsv data in a namedgraph + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_import_tsv_from_file_ns(self, admin, db, conn): + admin.import_file( + db.name, + content.MappingFile("data/test_import_delimited.sms"), + content.ImportFile("data/test_import_delimited.tsv"), + None, + self.ng, + ) + assert self.expected_count(conn, 145961, ng=f"<{self.ng}>") + + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_import_json_from_file(self, admin, db, conn): + admin.import_file( + db.name, + content.MappingFile("data/test_import_json.sms"), + content.ImportFile("data/test_import.json"), + ) + assert self.expected_count(conn, 223) + + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_import_json_from_contents(self, admin, db, conn): + with open("data/test_import.json") as json: + with open("data/test_import_json.sms") as sms: + admin.import_file( + db.name, + content.MappingRaw(sms.read()), + content.ImportRaw(json.read(), name="data.json"), + ) + assert self.expected_count(conn, 223) + + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_import_json_from_file_ns(self, admin, db, conn): + admin.import_file( + db.name, + content.MappingFile("data/test_import_json.sms"), + content.ImportFile("data/test_import.json"), + None, + "http://example.org/graph", + ) + assert self.expected_count(conn, 223, ng=f"<{self.ng}>") + + ## MATERIALIZE AND VG LOAD SHOULD BE PART OF VG TEST + + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_materialize_graph_from_file(self, admin, db, conn): + if self.run_vg_test: + admin.materialize_virtual_graph( + db.name, + content.MappingFile("data/music_mappings.ttl", "STARDOG"), + None, + self.music_options, + ) + assert self.expected_count(conn, 37) + else: + pytest.skip(self.msg_vg_test) + + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_materialize_graph_from_file(self, admin, db, conn): + if self.run_vg_test: + admin.materialize_virtual_graph( + db.name, + content.MappingFile("data/music_mappings.ttl", "STARDOG"), + None, + self.music_options, + ) + assert self.expected_count(conn, 37) + else: + pytest.skip(self.msg_vg_test) + + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_materialize_graph_from_file_with_ds(self, admin, db, conn, datasource): + + if self.run_vg_test: + admin.materialize_virtual_graph( + db.name, + content.MappingFile("data/music_mappings.ttl", "STARDOG"), + datasource.name, + ) + assert self.expected_count(conn, 37) + else: + pytest.skip(self.msg_vg_test) + + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_materialize_graph_from_content(self, admin, db, conn): + if self.run_vg_test: + with open("data/music_mappings.ttl") as f: + admin.materialize_virtual_graph( + db.name, + content.MappingRaw(f.read(), "STARDOG"), + None, + self.music_options, + ) + assert self.expected_count(conn, 37) + else: + pytest.skip(self.msg_vg_test) + + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_materialize_graph_from_file_in_ng(self, admin, db, conn): + if self.run_vg_test: + admin.materialize_virtual_graph( + db.name, + content.MappingFile("data/music_mappings.ttl", "STARDOG"), + None, + self.music_options, + self.ng, + ) + assert self.expected_count(conn, 37, ng=f"<{self.ng}>") + else: + pytest.skip(self.msg_vg_test) + + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_materialize_graph_from_file_with_ds(self, admin, db, datasource, conn): + if self.run_vg_test: + admin.materialize_virtual_graph( + db.name, + content.MappingFile("data/music_mappings.ttl", "STARDOG"), + datasource.name, + None, + self.ng, + ) + assert self.expected_count(conn, 37, ng=f"<{self.ng}>") + else: + pytest.skip(self.msg_vg_test) + + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_materialize_graph_from_content_with_ng(self, admin, db, conn): + if self.run_vg_test: + with open("data/music_mappings.ttl") as f: + admin.materialize_virtual_graph( + db.name, + content.MappingRaw(f.read(), "STARDOG"), + None, + self.music_options, + self.ng, + ) + assert self.expected_count(conn, 37, ng=f"<{self.ng}>") + else: + pytest.skip(self.msg_vg_test) + + @pytest.mark.dbname("pystardog-test-database") + @pytest.mark.conn_dbname("pystardog-test-database") + def test_import_db_deprecated(self, admin, db, conn): + if self.run_vg_test: + admin.import_virtual_graph( + db.name, + content.File("data/music_mappings.ttl"), + self.ng, + False, + self.music_options, + ) + assert self.expected_count(conn, 37, ng=f"<{self.ng}>") + else: + pytest.skip(self.msg_vg_test) + + +class TestVirtualGraph(TestStardog): + # Also available as fixture. + def _music_options(): + options = { + "jdbc.driver": "com.mysql.jdbc.Driver", + "jdbc.username": "user", + "jdbc.password": "pass", + "mappings.syntax": "STARDOG", + "jdbc.url": "jdbc:mysql://pystardog_mysql_music/music?useSSL=false", + } + return options + + def _video_options(): + properties = { + "jdbc.driver": "com.mysql.jdbc.Driver", + "jdbc.username": "user", + "jdbc.password": "pass", + "mappings.syntax": "STARDOG", + "jdbc.url": "jdbc:mysql://pystardog_mysql_videos/videos?useSSL=false", + } + return properties + + def _bad_options(): + bad_options = { + "jdbc.driver": "com.mysql.jdbc.Driver", + "jdbc.username": "non-existent", + "jdbc.password": "non-existent", + "jdbc.url": "jdbc:mysql://non-existent", + } + return bad_options + + def _simple_options(): + return {"mappings.syntax": "SMS2"} + + @pytest.mark.use_music_datasource(True) + def test_vg_update(self, virtual_graph): + assert "mappings.syntax" not in virtual_graph.options() + virtual_graph.update( + "new_name_vg", mappings="", options={"mappings.syntax": "SMS2"} + ) + assert "mappings.syntax" in virtual_graph.options() + assert virtual_graph.name == "new_name_vg" + + @pytest.mark.use_music_datasource(True) + def test_vg_no_options(self, virtual_graph): + # namespace is the "default" option, so passing no options will still generate a namespace option + assert len(virtual_graph.options()) == 1 + + @pytest.mark.use_music_datasource(True) + @pytest.mark.database_name("some-database") + def test_associate_vg_with_db(self, virtual_graph): + assert "some-database" == virtual_graph.info()["database"] + + @pytest.mark.virtual_graph_options(_video_options()) + def test_create_vg_with_data_source_specified_in_options(self, virtual_graph): + # vg = admin.new_virtual_graph("test_vg", mappings="", options=music_options) + # namespace is a default option, so final option count will be 1 + number of options added + assert len(virtual_graph.options()) > 1 + + # can't remember what is this supposed to do + @pytest.mark.use_music_datasource(True) + @pytest.mark.virtual_graph_options(_music_options()) + def test_vg_mappings(self, virtual_graph): + # default is STARDOG + assert ( + "@prefix : ." + == virtual_graph.mappings_string().decode("utf-8")[0:37] + ) + + # we test the first string of the entire response, as the rest of the response contains randomly generated + # strings each time a mapping is generated, hence we can't know beforehand what to compare it to. + # we assume that if the first line of the response is what we expect, the rest of the mappings are retrieved successfully as well. + assert ( + "@prefix : ." + == virtual_graph.mappings_string("R2RML").decode("utf-8")[0:37] + ) + + @pytest.mark.use_music_datasource(True) + @pytest.mark.mappings(content.File("test/data/music_mappings.ttl")) + def test_create_vg_with_custom_mappings(self, virtual_graph): + assert ( + "PREFIX : " + == virtual_graph.mappings_string().decode("utf-8")[0:39] + ) + + # this might be deprecated later, but we test it until then. + @pytest.mark.use_music_datasource(True) + def test_mappings_old(admin, virtual_graph): + assert ( + "@prefix : ." + == virtual_graph.mappings().decode("utf-8")[0:37] + ) + + @pytest.mark.virtual_graph_options(_bad_options()) + @pytest.mark.use_music_datasource(True) + @pytest.mark.dbname("test-vg") + @pytest.mark.conn_dbname("test-vg") + @pytest.mark.vgname("test-vg") + def test_datasource_preffered_over_options_for_vg_creation( + self, db, virtual_graph, conn + ): + res = conn.select("SELECT * {GRAPH { ?s ?p ?o }} LIMIT 1") + assert ( + "http://api.stardog.com/Artist/id=1" + == res["results"]["bindings"][0]["s"]["value"] + ) + + @pytest.mark.virtual_graph_options(_simple_options()) + @pytest.mark.use_music_datasource(True) + def test_extra_options_should_be_passed_to_vg(self, virtual_graph): + assert "mappings.syntax" in virtual_graph.options() + + def test_should_fail_when_no_datasource_is_passed(self, admin): + with pytest.raises( + exceptions.StardogException, match="Unable to determine data source type" + ): + admin.new_virtual_graph("vg", content.File("test/data/r2rml.ttl")) + + def test_should_fail_if_vg_does_not_exists(self, admin): + with pytest.raises( + exceptions.StardogException, match="Virtual Graph non-existent Not Found!" + ): + vg = admin.virtual_graph("non-existent") + vg.available() + + @pytest.mark.skip( + reason="We need to get sorted whether we want users to deal with prefix:// for vg/ds operations" + ) + def test_vg_exists_in_vg_list(self, admin, virtual_graph_music): + all_vgs = admin.virtual_graphs() + assert virtual_graph_music in all_vgs + + @pytest.mark.skip(reason="Fix me later") + # music options is passed as a fixture, need to make sure whether this is going to be a fixture or not. + @pytest.mark.dbname("test-import-db") + def test_import_vg(self, admin, db, music_options): + graph_name = "test-graph" + + # tests passing mappings + admin.import_virtual_graph( + "test-import-db", + mappings=content.File("test/data/music_mappings.ttl"), + named_graph=graph_name, + remove_all=True, + options=music_options, + ) + # specified mapping file generates a graph with total of 37 triples + assert 37 == self.count_records(db.name, conn_string) + + # tests passing empty mappings + admin.import_virtual_graph( + "test-import-db", + mappings="", + named_graph=graph_name, + remove_all=True, + options=music_options, + ) + # if mapping is not specified, music bd generates a graph with 79 triples + assert 79 == self.count_records(db.name, conn_string) + + # test removing_all false, it should return music records + video records. + admin.import_virtual_graph( + "test-import-db", + mappings="", + named_graph=graph_name, + remove_all=False, + options=videos_options, + ) + # if no mapping is specified, videos db generates a graph with 800 triples. adding un-mapped music sums up to 879. + assert 879 == self.count_records(db.name, conn_string) + + +class TestStoredQueries(TestStardog): + def test_query_does_not_exists(self, admin): + with pytest.raises(exceptions.StardogException, match="Stored query not found"): + admin.stored_query("not a real stored query") + + @pytest.mark.query_name("pystardog-stored-query-test") + def test_add_stored_query(self, stored_query): + stored_query.name == "pystardog-stored-query-test" + + def test_update_stored_query(self, stored_query): + # update a stored query + assert stored_query.description is None + stored_query.update(description="get all the triples") + assert stored_query.description == "get all the triples" + + def test_clear_all_stored_queries(self, admin): + # We don't use fixture because we want to handle the cleanup ourselves here. + admin.new_stored_query("everything", "select * where { ?s ?p ?o . }") + admin.new_stored_query("everything2", "select * where { ?s ?p ?o . }") + assert len(admin.stored_queries()) == 2 + admin.clear_stored_queries() + assert len(admin.stored_queries()) == 0 + + def test_query_in_query_list(self, admin, stored_query): + assert stored_query.name in [sq.name for sq in admin.stored_queries()] + + # this was not in the test, for some reason don't work. need to confirm why some of these work and some wont' + # assert stored_query.name in admin.stored_queries() diff --git a/test/test_cache.py b/test/test_cache.py new file mode 100644 index 0000000..90c0f53 --- /dev/null +++ b/test/test_cache.py @@ -0,0 +1,217 @@ +def test_cache_targets(admin, cache_target_info): + + cache_target_name = cache_target_info["target_name"] + cache_target_hostname = cache_target_info["hostname"] + cache_target_port = cache_target_info["port"] + cache_target_username = cache_target_info["username"] + cache_target_password = cache_target_info["password"] + + cache_targets = admin.cache_targets() + assert len(cache_targets) == 0 + + cache_target = admin.new_cache_target( + cache_target_name, + cache_target_hostname, + cache_target_port, + cache_target_username, + cache_target_password, + ) + wait_for_creating_cache_target(admin, cache_target_name) + cache_targets = admin.cache_targets() + assert len(cache_targets) == 1 + + assert cache_target.info()["name"] == cache_target_name + assert cache_target.info()["port"] == cache_target_port + assert cache_target.info()["hostname"] == cache_target_hostname + assert cache_target.info()["username"] == cache_target_username + + # test remove() + cache_target.remove() + wait_for_cleaning_cache_target(admin, cache_target.name) + cache_targets = admin.cache_targets() + assert len(cache_targets) == 0 + + # tests orphan + cache_target = admin.new_cache_target( + cache_target_name, + cache_target_hostname, + cache_target_port, + cache_target_username, + cache_target_password, + ) + wait_for_creating_cache_target(admin, cache_target_name) + cache_target.orphan() + wait_for_cleaning_cache_target(admin, cache_target.name) + cache_targets = admin.cache_targets() + assert len(cache_targets) == 0 + # recall that removing a cache is an idempotent operation, and will always (unless it fails) + # return that cache target was removed, even if it doesn't exists in the first place + # Removing a an orphaned cache target will not delete the data, because the target is already orphaned + # We need to recreate the orphaned cached target (using use_existing_db) in order to fully delete its data + cache_target = admin.new_cache_target( + cache_target_name, + cache_target_hostname, + cache_target_port, + cache_target_username, + cache_target_password, + use_existing_db=True, + ) + wait_for_creating_cache_target(admin, cache_target_name) + cache_target.remove() + + +def test_cache_ng_datasets(admin, bulkload_content, cache_target_info): + + cache_target_name = cache_target_info["target_name"] + cache_target_hostname = cache_target_info["hostname"] + cache_target_port = cache_target_info["port"] + cache_target_username = cache_target_info["username"] + cache_target_password = cache_target_info["password"] + + cache_target = admin.new_cache_target( + cache_target_name, + cache_target_hostname, + cache_target_port, + cache_target_username, + cache_target_password, + ) + wait_for_creating_cache_target(admin, cache_target_name) + + bl = admin.new_database("bulkload", {}, *bulkload_content, copy_to_server=True) + + assert len(admin.cached_graphs()) == 0 + cached_graph_name = "cache://cached-ng" + cached_graph = admin.new_cached_graph( + cached_graph_name, cache_target.name, "urn:context", bl.name + ) + + assert len(admin.cached_graphs()) == 1 + + cached_graph_status = cached_graph.status() + assert cached_graph_status[0]["name"] == cached_graph_name + assert cached_graph_status[0]["target"] == cache_target.name + cached_graph.refresh() + cached_graph.drop() + assert len(admin.cached_graphs()) == 0 + + +def test_cache_vg_datasets(admin, music_options, cache_target_info): + + cache_target_name = cache_target_info["target_name"] + cache_target_hostname = cache_target_info["hostname"] + cache_target_port = cache_target_info["port"] + cache_target_username = cache_target_info["username"] + cache_target_password = cache_target_info["password"] + + # creating a VG using empty mappings, and specifying a datasource + ds = admin.new_datasource("music", music_options) + vg = admin.new_virtual_graph("test_vg", mappings="", datasource=ds.name) + + cache_target = admin.new_cache_target( + cache_target_name, + cache_target_hostname, + cache_target_port, + cache_target_username, + cache_target_password, + ) + wait_for_creating_cache_target(admin, cache_target_name) + # We need to register the VG into the cache target as well. + + conn_string_cache = { + "endpoint": "http://" + cache_target_hostname + ":5820", + "username": "admin", + "password": "admin", + } + + with stardog.admin.Admin(**conn_string_cache) as admin_cache_target: + ds_cached = admin_cache_target.new_datasource("music", music_options) + vg_cached = admin_cache_target.new_virtual_graph( + "test_vg", mappings="", datasource=ds.name + ) + + assert len(admin.cached_graphs()) == 0 + + cached_graph_name = "cache://cached-vg" + cached_graph = admin.new_cached_graph( + cached_graph_name, cache_target.name, "virtual://" + vg.name + ) + + assert len(admin.cached_graphs()) == 1 + + cached_graph_status = cached_graph.status() + assert cached_graph_status[0]["name"] == cached_graph_name + assert cached_graph_status[0]["target"] == cache_target.name + cached_graph.refresh() + cached_graph.drop() + assert len(admin.cached_graphs()) == 0 + + cache_target.remove() + wait_for_cleaning_cache_target(admin, cache_target.name) + + vg.delete() + ds.delete() + + +@pytest.mark.skip( + reason="Caching queries is no longer supported. We are skipping we but should make sure it still works for older SD versions" +) +def test_cache_query_datasets(admin, bulkload_content, cache_target_info): + + cache_target_name = cache_target_info["target_name"] + cache_target_hostname = cache_target_info["hostname"] + cache_target_port = cache_target_info["port"] + cache_target_username = cache_target_info["username"] + cache_target_password = cache_target_info["password"] + + cache_target = admin.new_cache_target( + cache_target_name, + cache_target_hostname, + cache_target_port, + cache_target_username, + cache_target_password, + ) + wait_for_creating_cache_target(admin, cache_target_name) + + bl = admin.new_database("bulkload", {}, *bulkload_content, copy_to_server=True) + + assert len(admin.cached_queries()) == 0 + + cached_query_name = "cache://my_new_query_cached" + + cached_query = admin.new_cached_query( + cached_query_name, cache_target.name, "SELECT * { ?s ?p ?o }", bl.name + ) + # wait_for_creating_cache_dataset(admin, cached_query_name, 'query') + + assert len(admin.cached_queries()) == 1 + + cached_query_status = cached_query.status() + assert cached_query_status[0]["name"] == cached_query_name + assert cached_query_status[0]["target"] == cache_target.name + cached_query.refresh() + cached_query.drop() + assert len(admin.cached_queries()) == 0 + + cache_target.remove() + wait_for_cleaning_cache_target(admin, cache_target.name) + + +def test_cache_target_in_cache_target_list(admin, cache_target_info): + cache_target_name = cache_target_info["target_name"] + cache_target_hostname = cache_target_info["hostname"] + cache_target_port = cache_target_info["port"] + cache_target_username = cache_target_info["username"] + cache_target_password = cache_target_info["password"] + + cache_targets = admin.cache_targets() + assert len(cache_targets) == 0 + + cache_target = admin.new_cache_target( + cache_target_name, + cache_target_hostname, + cache_target_port, + cache_target_username, + cache_target_password, + ) + wait_for_creating_cache_target(admin, cache_target_name) + assert cache_target in admin.cache_targets() diff --git a/test/test_cluster.py b/test/test_cluster.py new file mode 100644 index 0000000..04f1de5 --- /dev/null +++ b/test/test_cluster.py @@ -0,0 +1,25 @@ +import pytest +from stardog import admin as sd_admin +from stardog import exceptions + + +@pytest.mark.skip( + reason="Implementation is not well documented, https://stardog.atlassian.net/browse/PLAT-2946" +) +def test_cluster_diagnostic_report(admin): + admin.cluster_diagnostic_reports() + + +def test_cluster_readonly(admin): + admin.cluster_start_readonly() + with pytest.raises(exceptions.StardogException, match="The cluster is read only"): + admin.new_database("fail_db") + admin.cluster_stop_readonly() + + +def test_coordinator_check(admin, conn_string): + coordinator_info = admin.cluster_info()["coordinator"] + coordinator_conn_string = conn_string + coordinator_conn_string["endpoint"] = "http://" + coordinator_info + with sd_admin.Admin(**coordinator_conn_string) as admin_coordinator_check: + assert admin_coordinator_check.cluster_coordinator_check() diff --git a/test/test_connection.py b/test/test_connection.py index dad2e96..c2ae3c9 100644 --- a/test/test_connection.py +++ b/test/test_connection.py @@ -1,10 +1,6 @@ import pytest -import stardog.admin -import stardog.connection as connection -import stardog.content as content -import stardog.content_types as content_types -import stardog.exceptions as exceptions +from stardog import admin, connection, content, content_types, exceptions @pytest.fixture() @@ -25,9 +21,9 @@ def conn(conn_string, proxies, ssl_verify): @pytest.fixture(autouse="True") def db(conn_string): - with stardog.admin.Admin(**conn_string) as admin: - db = admin.new_database("newtest", {"search.enabled": True}) - yield admin + with admin.Admin(**conn_string) as sd_admin: + db = sd_admin.new_database("newtest", {"search.enabled": True}) + yield sd_admin db.drop() @@ -359,8 +355,8 @@ def test_icv(conn): def test_graphql(conn_string): - with stardog.admin.Admin(**conn_string) as admin: - db = admin.new_database( + with admin.Admin(**conn_string) as sd_admin: + db = sd_admin.new_database( "graphql", {}, content.File("test/data/starwars.ttl"), copy_to_server=True ) diff --git a/test/test_integration.py b/test/test_integration.py deleted file mode 100644 index bea1a15..0000000 --- a/test/test_integration.py +++ /dev/null @@ -1,620 +0,0 @@ -import copy -import os -import re -import sys -from enum import Enum - -import pytest -import stardog -from stardog import admin, connection, content, content_types - -############################################################### -# -# These test can be run against a cluster or standalone server -# -############################################################### -from stardog.exceptions import StardogException - - -class Resource(Enum): - DB = "db_sd_int_test" - DS = "ds_sd_int_test" - VG = "vg_sd_int_test" - NG = "http://example.org/graph" - - -class TestStardog: - is_local = ( - True - if "localhost" in os.environ.get("STARDOG_ENDPOINT", "http://localhost:5820") - and not os.path.exists("/.dockerenv") - else False - ) - - def setup_method(self, test_method): - """ - Before each test a fresh stardog admin and credential object will be provided, just in case it got corrupted. - - @rtype: None - - """ - - conn = { - "endpoint": os.environ.get("STARDOG_ENDPOINT", "http://localhost:5820"), - "username": os.environ.get("STARDOG_USERNAME", "admin"), - "password": os.environ.get("STARDOG_PASSWORD", "admin"), - } - self.conn = conn - self.admin = stardog.Admin(**conn) - - if not os.path.isdir("data") and not os.path.islink("data"): - os.symlink("test/data", "data") - - def teardown_method(self, test_method): - """ - After each test this will destroy all resources on the instance. - - @rtype: None - """ - - dbs = self.admin.databases() - - for db in dbs: - try: - db.drop() - except StardogException as e: - if e.stardog_code != "0D0DU2": - raise e - pass - - vgs = self.admin.virtual_graphs() - - for vg in vgs: - try: - vg.delete() - except StardogException as e: - if e.stardog_code != "0D0DU2": - raise e - pass - - dss = self.admin.datasources() - - for ds in dss: - try: - ds.delete() - except StardogException as e: - if e.stardog_code != "0D0DU2": - raise e - pass - - def expected_count( - self, expected=1, db: str = None, ng: str = "stardog:context:default" - ) -> bool: - db = db if db else self.db_name - - with connection.Connection(db, **self.conn) as c: - x = f"select * where {{ graph {ng} {{?s ?p ?o}} }}" - q = c.select(f"select * where {{ graph {ng} {{?s ?p ?o}} }}") - return len(q["results"]["bindings"]) == expected - - def connection(self, db: str = None): - db = db if db else self.db_name - - return connection.Connection(db, **self.conn) - - @property - def ng(self) -> str: - """ - This method return the default named-graph string - - @return: str - """ - return Resource.NG.value - - ################################################################################################################ - # - # Database helpers - # - ################################################################################################################ - - @property - def db(self) -> stardog.admin.Database: - """ - This method will return a new default database object for the test. If it exists it will be destroyed - - @rtype: stardog.admin.Database - """ - # db = self.admin.database(self.db_name) - - try: - # db.drop() - db = self.admin.database(self.db_name) - except StardogException as e: - if e.stardog_code != "0D0DU2": - raise e - pass - - return self.admin.new_database(self.db_name) - - @property - def db_name(self) -> str: - """ - This method will return the default database name - - @rtype: str - """ - return Resource.DB.value - - @property - def bulk_load_content(self) -> list: - contents = [ - content.Raw( - " .", - content_types.TURTLE, - name="bulkload.ttl", - ), - (content.File("data/example.ttl.zip"), "urn:context"), - content.URL( - "https://www.w3.org/2000/10/rdf-tests/" - "RDF-Model-Syntax_1.0/ms_4.1_1.rdf" - ), - ] - return contents - - ################################################################################################################ - # - # Datasource & VirtualGraph helpers - # - ################################################################################################################ - @property - def ds(self) -> stardog.admin.DataSource: - """ - This method will return a new default datasource object for the test. If it exists it will be destroyed and recreated. - - @rtype: stardog.admin.DataSource - """ - - try: - self.admin.datasource(self.ds_name) - except StardogException as e: - if e.http_code != 400: - raise e - pass - - return self.admin.new_datasource(self.ds_name, self.music_options) - - @property - def ds_name(self): - """ - This method will return the default data-source name - - @rtype: str - """ - return Resource.DS.value - - @property - def vg(self) -> stardog.admin.VirtualGraph: - """ - This method will return a new default virtual_graph object for the test. If it exists it will be destroyed and recreated. - - @rtype: stardog.admin.VirtualGraph - """ - - try: - self.admin.virtual_graph(self.db_name) - except StardogException as e: - if e.stardog_code != "0D0DU2": - raise e - pass - - return self.admin.new_datasource(self.ds_name) - - @property - def vg_name(self): - """ - This method will return the default virtual_graph name - - @rtype: str - """ - return Resource.DS.value - - @property - def music_options(self): - if TestStardog.is_local: - return { - "jdbc.url": "jdbc:sqlite:/tmp/music.db", - "jdbc.username": "whatever", - "jdbc.password": "whatever", - "jdbc.driver": "org.sqlite.JDBC", - "sql.default.schema": "main", - "sql.defaults": "main", - "sql.skip.validation": "true", - "sql.dialect": "POSTGRESQL", - } - else: - return { - "jdbc.driver": "com.mysql.jdbc.Driver", - "jdbc.username": "user", - "jdbc.password": "pass", - "mappings.syntax": "STARDOG", - "jdbc.url": "jdbc:mysql://pystardog_mysql_music/music?useSSL=false", - } - - -class TestDatabase(TestStardog): - def test_database_creation(self): - # create database, validate it created and drop it. - - db = self.admin.new_database(self.db_name) - assert len(self.admin.databases()) == 1 - assert db.name == self.db_name - - # check that the default are used - assert db.get_options("search.enabled", "spatial.enabled") == { - "search.enabled": False, - "spatial.enabled": False, - } - - db = self.admin.database(self.db_name) - assert db.name == self.db_name - - db.drop() - assert len(self.admin.databases()) == 0 - - def test_new_with_properties(self): - db = self.admin.new_database( - self.db_name, {"search.enabled": True, "spatial.enabled": True} - ) - assert db.get_options("search.enabled", "spatial.enabled") == { - "search.enabled": True, - "spatial.enabled": True, - } - - def test_online_offline(self): - db = self.db - - # change options - assert db.get_options("database.online") == {"database.online": True} - db.offline() - assert db.get_options("database.online") == {"database.online": False} - db.online() - assert db.get_options("database.online") == {"database.online": True} - - def test_get_all_options(self): - db = self.db - - options = db.get_all_options() - - assert len(options.keys()) > 150 - - def test_optimized(self): - db = self.db - db.optimize() - - # for now this is the best we can do - assert True - - def test_verity(self): - db = self.db - db.verify() - - # for now this is the best we can do - assert True - - def test_bulkload(self): - self.admin.new_database( - self.db_name, {}, *self.bulk_load_content, copy_to_server=True - ) - assert self.expected_count(6) - assert self.expected_count(1, ng="") - - def test_non_existent_db(self): - # test non-existent db - with pytest.raises(StardogException, match="does not exist"): - self.admin.database("not_real_db") - - -class TestDataSource(TestStardog): - def test_datasource_creation(self): - # create datasource, validate it created, and delete it - ds = self.admin.new_datasource(self.ds_name, self.music_options) - - assert len(self.admin.datasources()) == 1 - assert ds.name == self.ds_name - - ds = self.admin.datasource(self.ds_name) - assert ds.name == self.ds_name - - ds.delete() - assert len(self.admin.datasources()) == 0 - - -class TestLoadData(TestStardog): - def setup_class(self): - self.run_vg_test = True - - if TestStardog.is_local: - # Let's check if we have the sqlite driver - libpath = os.environ.get("STARDOG_EXT", None) - - driver_found = False - if libpath: - d = re.compile("sqlite-jdbc") - for file in os.listdir(libpath): - if d.match(file): - driver_found = True - - if driver_found: - if not os.path.exists("/tmp/music.db"): - import sqlite3 - from sqlite3 import Error - - conn = None - try: - conn = sqlite3.connect("/tmp/music.db") - with open("data/music_schema.sql") as f: - conn.executescript(f.read()) - with open("data/beatles.sql") as f: - conn.executescript(f.read()) - except Error as e: - self.run_vg_test = False - except FileNotFoundError as e: - self.run_vg_test = False - finally: - if conn: - conn.close() - else: - self.run_vg_test = False - self.msg_vg_test = """ -No sqlite driver detected, all virtual graph test will be disabled -Download driver from https://search.maven.org/artifact/org.xerial/sqlite-jdbc -And install in directory pointed to by STARDOG_EXT and restart server -""" - - def test_data_add_ttl_from_file(self): - db = self.db - with self.connection() as c: - c.begin() - c.add(stardog.content.File("data/example.ttl")) - c.commit() - assert self.expected_count(1) - - def test_data_add_ttl_from_file_server_side(self): - db = self.db - with self.connection() as c: - c.begin() - c.add(stardog.content.File("/tmp/example-remote.ttl"), server_side=True) - c.commit() - assert self.expected_count(1) - - def test_data_add_ttl_from_content(self): - db = self.db - with self.connection() as c: - with open("data/example.ttl") as f: - c.begin() - c.add(stardog.content.Raw(f.read(), name="example.ttl")) - c.commit() - assert self.expected_count(1) - - # can we put the ttl data in a namedgraph - def test_data_add_ttl_from_file_ns(self): - db = self.db - with self.connection() as c: - c.begin() - c.add(stardog.content.File("data/example.ttl"), graph_uri=self.ng) - c.commit() - assert self.expected_count(1, ng=f"<{self.ng}>") - - def test_import_csv_from_file(self): - db = self.db - self.admin.import_file( - db.name, - stardog.content.MappingFile("data/test_import_delimited.sms"), - stardog.content.ImportFile("data/test_import_delimited.csv"), - ) - assert self.expected_count(145961) - - def test_import_csv_from_content(self): - db = self.db - with open("data/test_import_delimited.csv") as csv: - with open("data/test_import_delimited.sms") as sms: - self.admin.import_file( - db.name, - stardog.content.MappingRaw(sms.read()), - stardog.content.ImportRaw(csv.read(), name="data.csv"), - ) - assert self.expected_count(145961) - - # put the csv data in a namedgraph - def test_import_csv_from_file_ns(self): - db = self.db - self.admin.import_file( - db.name, - stardog.content.MappingFile("data/test_import_delimited.sms"), - stardog.content.ImportFile("data/test_import_delimited.csv"), - None, - self.ng, - ) - assert self.expected_count(145961, ng=f"<{self.ng}>") - - def test_import_tsv_from_file(self): - db = self.db - self.admin.import_file( - db.name, - stardog.content.MappingFile("data/test_import_delimited.sms"), - stardog.content.ImportFile("data/test_import_delimited.csv"), - ) - assert self.expected_count(145961) - - def test_import_tsv_from_content(self): - db = self.db - with open("data/test_import_delimited.csv") as tsv: - with open("data/test_import_delimited.sms") as sms: - self.admin.import_file( - db.name, - stardog.content.MappingRaw(sms.read()), - stardog.content.ImportRaw(tsv.read(), name="data.csv"), - ) - assert self.expected_count(145961) - - # put the tsv data in a namedgraph - def test_import_tsv_from_file_ns(self): - db = self.db - self.admin.import_file( - db.name, - stardog.content.MappingFile("data/test_import_delimited.sms"), - stardog.content.ImportFile("data/test_import_delimited.tsv"), - None, - self.ng, - ) - assert self.expected_count(145961, ng=f"<{self.ng}>") - - def test_import_json_from_file(self): - db = self.db - self.admin.import_file( - db.name, - stardog.content.MappingFile("data/test_import_json.sms"), - stardog.content.ImportFile("data/test_import.json"), - ) - assert self.expected_count(223) - - def test_import_json_from_contents(self): - db = self.db - with open("data/test_import.json") as json: - with open("data/test_import_json.sms") as sms: - self.admin.import_file( - db.name, - stardog.content.MappingRaw(sms.read()), - stardog.content.ImportRaw(json.read(), name="data.json"), - ) - assert self.expected_count(223) - - def test_import_json_from_file_ns(self): - db = self.db - self.admin.import_file( - db.name, - stardog.content.MappingFile("data/test_import_json.sms"), - stardog.content.ImportFile("data/test_import.json"), - None, - "http://example.org/graph", - ) - assert self.expected_count(223, ng=f"<{self.ng}>") - - def test_materialize_graph_from_file(self): - db = self.db - - if self.run_vg_test: - self.admin.materialize_virtual_graph( - db.name, - stardog.content.MappingFile("data/music_mappings.ttl", "STARDOG"), - None, - self.music_options, - ) - assert self.expected_count(37) - else: - pytest.skip(self.msg_vg_test) - - def test_materialize_graph_from_file(self): - db = self.db - - if self.run_vg_test: - self.admin.materialize_virtual_graph( - db.name, - stardog.content.MappingFile("data/music_mappings.ttl", "STARDOG"), - None, - self.music_options, - ) - assert self.expected_count(37) - else: - pytest.skip(self.msg_vg_test) - - def test_materialize_graph_from_file_with_ds(self): - db = self.db - ds = self.ds - - if self.run_vg_test: - self.admin.materialize_virtual_graph( - db.name, - stardog.content.MappingFile("data/music_mappings.ttl", "STARDOG"), - ds.name, - ) - assert self.expected_count(37) - else: - pytest.skip(self.msg_vg_test) - - def test_materialize_graph_from_content(self): - db = self.db - - if self.run_vg_test: - with open("data/music_mappings.ttl") as f: - self.admin.materialize_virtual_graph( - db.name, - stardog.content.MappingRaw(f.read(), "STARDOG"), - None, - self.music_options, - ) - assert self.expected_count(37) - else: - pytest.skip(self.msg_vg_test) - - def test_materialize_graph_from_file_in_ng(self): - db = self.db - - if self.run_vg_test: - self.admin.materialize_virtual_graph( - db.name, - stardog.content.MappingFile("data/music_mappings.ttl", "STARDOG"), - None, - self.music_options, - self.ng, - ) - assert self.expected_count(37, ng=f"<{self.ng}>") - else: - pytest.skip(self.msg_vg_test) - - def test_materialize_graph_from_file_with_ds(self): - db = self.db - ds = self.ds - - if self.run_vg_test: - self.admin.materialize_virtual_graph( - db.name, - stardog.content.MappingFile("data/music_mappings.ttl", "STARDOG"), - ds.name, - None, - self.ng, - ) - assert self.expected_count(37, ng=f"<{self.ng}>") - else: - pytest.skip(self.msg_vg_test) - - def test_materialize_graph_from_content_with_ng(self): - db = self.db - - if self.run_vg_test: - with open("data/music_mappings.ttl") as f: - self.admin.materialize_virtual_graph( - db.name, - stardog.content.MappingRaw(f.read(), "STARDOG"), - None, - self.music_options, - self.ng, - ) - assert self.expected_count(37, ng=f"<{self.ng}>") - else: - pytest.skip(self.msg_vg_test) - - def test_import_db_deprecated(self): - db = self.db - - if self.run_vg_test: - self.admin.import_virtual_graph( - db.name, - stardog.content.File("data/music_mappings.ttl"), - self.ng, - False, - self.music_options, - ) - assert self.expected_count(37, ng=f"<{self.ng}>") - else: - pytest.skip(self.msg_vg_test) diff --git a/test/test_server_admin.py b/test/test_server_admin.py new file mode 100644 index 0000000..63ead47 --- /dev/null +++ b/test/test_server_admin.py @@ -0,0 +1,80 @@ +import pytest +from stardog import exceptions + + +def test_get_server_metrics(admin): + assert "dbms.storage.levels" in admin.get_server_metrics() + + +def test_get_prometheus_metrics(admin): + assert "TYPE databases_planCache_size gauge" in admin.get_prometheus_metrics() + + +def test_get_metadata_properties(admin): + assert "database.archetypes" in admin.get_all_metadata_properties() + + +def test_alive(admin): + assert admin.alive() + + +def test_healthcheck(admin): + assert admin.healthcheck() + + +def test_queries(admin): + assert len(admin.queries()) == 0 + + with pytest.raises(exceptions.StardogException, match="Query not found: 1"): + admin.query(1) + + with pytest.raises(exceptions.StardogException, match="Query not found: 1"): + admin.kill_query(1) + + +## This might or might not be better to move it to a separate file. +## Since we move to machine executor, we don't really need to ssh, since we can modify the files on the host +@pytest.mark.skip( + reason="We need to sort out how we are going to deal with ssh, since it's no longer required" +) +def test_backup_all(admin): + admin.backup_all() + + default_backup = subprocess.run( + [ + "sshpass", + "-p", + SSH_PASS, + "ssh", + "-o", + "StrictHostKeyChecking=no", + "ssh://" + SSH_USER + "@" + STARDOG_HOSTNAME_NODE_1 + ":2222", + "--", + "ls", + "-la", + "/var/opt/stardog/", + ], + stdout=subprocess.PIPE, + universal_newlines=True, + ) + assert ".backup" in default_backup.stdout + + admin.backup_all(location="/tmp") + tmp_backup = subprocess.run( + [ + "sshpass", + "-p", + SSH_PASS, + "ssh", + "-o", + "StrictHostKeyChecking=no", + "ssh://" + SSH_USER + "@" + STARDOG_HOSTNAME_NODE_1 + ":2222", + "--", + "ls", + "-l", + "/tmp", + ], + stdout=subprocess.PIPE, + universal_newlines=True, + ) + assert "meta" in tmp_backup.stdout diff --git a/test/test_single_node.py b/test/test_single_node.py index 8442f3a..afca0e2 100644 --- a/test/test_single_node.py +++ b/test/test_single_node.py @@ -1,16 +1,7 @@ import pytest -import stardog.admin import os import datetime -import stardog.content as content -import stardog.exceptions as exceptions -import stardog.connection as connection - - -@pytest.fixture() -def admin(conn_string): - with stardog.admin.Admin(**conn_string) as admin: - yield admin +from stardog import admin, connection, content, exceptions def test_database_repair(admin, bulkload_content): diff --git a/test/test_standby.py b/test/test_standby.py new file mode 100644 index 0000000..d3c5780 --- /dev/null +++ b/test/test_standby.py @@ -0,0 +1,28 @@ +from .utils import wait_standby_node_to_join, get_node_ip + + +# We should pass a standby admin object instead of a connection string. +def test_cluster_standby(admin, cluster_standby_node_conn_string): + + with stardog.admin.Admin(**cluster_standby_node_conn_string) as admin_standby: + assert admin_standby.standby_node_pause(pause=True) + assert admin_standby.standby_node_pause_status()["STATE"] == "PAUSED" + assert admin_standby.standby_node_pause(pause=False) + assert admin_standby.standby_node_pause_status()["STATE"] == "WAITING" + + # Join a standby node is still allowed even if it's not part of the registry. + admin_standby.cluster_join() + wait_standby_node_to_join(admin_standby) + + # Make sure the standby node is part of the cluster + standby_node_info = ( + get_node_ip(STARDOG_HOSTNAME_STANDBY).decode("utf-8").strip() + ":5820" + ) + assert standby_node_info in admin_standby.cluster_info()["nodes"] + + standby_nodes = admin_standby.cluster_list_standby_nodes() + node_id = standby_nodes["standbynodes"][0] + # removes a standby node from the registry, i.e from syncing with the rest of the cluster. + admin_standby.cluster_revoke_standby_access(standby_nodes["standbynodes"][0]) + standby_nodes_revoked = admin_standby.cluster_list_standby_nodes() + assert node_id not in standby_nodes_revoked["standbynodes"] diff --git a/test/test_unit.py b/test/test_unit.py index 7b297f8..fd87b03 100644 --- a/test/test_unit.py +++ b/test/test_unit.py @@ -2,9 +2,7 @@ import requests import requests_mock -import stardog.exceptions -from stardog import content -from stardog.content_types import * +from stardog import admin, content, exceptions, content_types class TestMaterializeGraph: @@ -19,11 +17,11 @@ def test_materialize_graph_from_file_with_ds(self): ) m.get("http://localhost:5820/admin/alive", status_code=200) - admin = stardog.admin.Admin("http://localhost:5820", "admin", "admin") + sd_admin = admin.Admin("http://localhost:5820", "admin", "admin") - admin.materialize_virtual_graph( + sd_admin.materialize_virtual_graph( "db_test", - stardog.content.MappingFile("test.sms2"), + content.MappingFile("test.sms2"), "ds_test", None, None, @@ -42,17 +40,17 @@ def test_materialize_graph_from_file_with_bad_ds(self): ) m.get("http://localhost:5820/admin/alive", status_code=200) - admin = stardog.admin.Admin("http://localhost:5820", "admin", "admin") + sd_admin = admin.Admin("http://localhost:5820", "admin", "admin") try: - admin.materialize_virtual_graph( + sd_admin.materialize_virtual_graph( "db_test", - stardog.content.MappingFile("test.sms2"), + content.MappingFile("test.sms2"), "ds_test", None, None, ) - except stardog.exceptions.StardogException as e: + except exceptions.StardogException as e: if e.http_code == 404: assert True return @@ -79,11 +77,11 @@ def text_callback(request, context): ) m.get("http://localhost:5820/admin/alive", status_code=200) - admin = stardog.admin.Admin("http://localhost:5820", "admin", "admin") + sd_admin = admin.Admin("http://localhost:5820", "admin", "admin") - admin.materialize_virtual_graph( + sd_admin.materialize_virtual_graph( "db_test", - stardog.content.MappingFile("test.sms2"), + content.MappingFile("test.sms2"), "ds_test", None, None, @@ -92,12 +90,12 @@ def text_callback(request, context): def test_materialize_graph_missing_ds_or_options(self): with requests_mock.Mocker() as m: m.get("http://localhost:5820/admin/alive", status_code=200) - admin = stardog.admin.Admin("http://localhost:5820", "admin", "admin") + sd_admin = admin.Admin("http://localhost:5820", "admin", "admin") try: - admin.materialize_virtual_graph( + sd_admin.materialize_virtual_graph( "db_test", - stardog.content.MappingFile("test.sms2"), + content.MappingFile("test.sms2"), ) except AssertionError as e: assert True @@ -108,64 +106,115 @@ def test_materialize_graph_missing_ds_or_options(self): class TestContentType: def test_guess_rdf_format(self): - assert guess_rdf_format("test.ttl") == (None, TURTLE) - assert guess_rdf_format("test.rdf") == (None, RDF_XML) - assert guess_rdf_format("test.rdfs") == (None, RDF_XML) - assert guess_rdf_format("test.owl") == (None, RDF_XML) - assert guess_rdf_format("test.xml") == (None, RDF_XML) - assert guess_rdf_format("test.nt") == (None, NTRIPLES) - assert guess_rdf_format("test.n3") == (None, N3) - assert guess_rdf_format("test.nq") == (None, NQUADS) - assert guess_rdf_format("test.nquads") == (None, NQUADS) - assert guess_rdf_format("test.trig") == (None, TRIG) - assert guess_rdf_format("test.trix") == (None, TRIX) - assert guess_rdf_format("test.json") == (None, LD_JSON) - assert guess_rdf_format("test.jsonld") == (None, LD_JSON) - - assert guess_rdf_format("test.ttl.gz") == ("gzip", TURTLE) - assert guess_rdf_format("test.ttl.zip") == ("zip", TURTLE) - assert guess_rdf_format("test.ttl.bz2") == ("bzip2", TURTLE) + assert content_types.guess_rdf_format("test.ttl") == ( + None, + content_types.TURTLE, + ) + assert content_types.guess_rdf_format("test.rdf") == ( + None, + content_types.RDF_XML, + ) + assert content_types.guess_rdf_format("test.rdfs") == ( + None, + content_types.RDF_XML, + ) + assert content_types.guess_rdf_format("test.owl") == ( + None, + content_types.RDF_XML, + ) + assert content_types.guess_rdf_format("test.xml") == ( + None, + content_types.RDF_XML, + ) + assert content_types.guess_rdf_format("test.nt") == ( + None, + content_types.NTRIPLES, + ) + assert content_types.guess_rdf_format("test.n3") == (None, content_types.N3) + assert content_types.guess_rdf_format("test.nq") == (None, content_types.NQUADS) + assert content_types.guess_rdf_format("test.nquads") == ( + None, + content_types.NQUADS, + ) + assert content_types.guess_rdf_format("test.trig") == (None, content_types.TRIG) + assert content_types.guess_rdf_format("test.trix") == (None, content_types.TRIX) + assert content_types.guess_rdf_format("test.json") == ( + None, + content_types.LD_JSON, + ) + assert content_types.guess_rdf_format("test.jsonld") == ( + None, + content_types.LD_JSON, + ) + + assert content_types.guess_rdf_format("test.ttl.gz") == ( + "gzip", + content_types.TURTLE, + ) + assert content_types.guess_rdf_format("test.ttl.zip") == ( + "zip", + content_types.TURTLE, + ) + assert content_types.guess_rdf_format("test.ttl.bz2") == ( + "bzip2", + content_types.TURTLE, + ) def test_guess_mapping_format_from_filename(self): - assert guess_mapping_format("test.rq") == "SMS2" - assert guess_mapping_format("test.sms2") == "SMS2" - assert guess_mapping_format("test.sms") == "SMS2" - assert guess_mapping_format("test.r2rml") == "R2RML" - assert guess_mapping_format("test.what") is None + assert content_types.guess_mapping_format("test.rq") == "SMS2" + assert content_types.guess_mapping_format("test.sms2") == "SMS2" + assert content_types.guess_mapping_format("test.sms") == "SMS2" + assert content_types.guess_mapping_format("test.r2rml") == "R2RML" + assert content_types.guess_mapping_format("test.what") is None def test_guess_mapping_format_from_content(self): - assert guess_mapping_format_from_content("MAPPING\nFROM ") == "SMS2" - assert guess_mapping_format_from_content("#A comment\nMAPPING FROM ") == "SMS2" + assert ( + content_types.guess_mapping_format_from_content("MAPPING\nFROM ") == "SMS2" + ) + assert ( + content_types.guess_mapping_format_from_content("#A comment\nMAPPING FROM ") + == "SMS2" + ) def test_guess_import_format(self): - assert guess_import_format("test.csv") == (None, "text/csv", "DELIMITED", ",") - assert guess_import_format("test.tsv") == ( + assert content_types.guess_import_format("test.csv") == ( + None, + "text/csv", + "DELIMITED", + ",", + ) + assert content_types.guess_import_format("test.tsv") == ( None, "text/tab-separated-values", "DELIMITED", "\t", ) - assert guess_import_format("test.json") == ( + assert content_types.guess_import_format("test.json") == ( None, "application/json", "JSON", None, ) - assert guess_import_format("test.what") == (None, None, None, None) + assert content_types.guess_import_format("test.what") == ( + None, + None, + None, + None, + ) - assert guess_import_format("test.csv.gz") == ( + assert content_types.guess_import_format("test.csv.gz") == ( "gzip", "text/csv", "DELIMITED", ",", ) - assert guess_import_format("test.csv.zip") == ( + assert content_types.guess_import_format("test.csv.zip") == ( "zip", "text/csv", "DELIMITED", ",", ) - assert guess_import_format("test.csv.bz2") == ( + assert content_types.guess_import_format("test.csv.bz2") == ( "bzip2", "text/csv", "DELIMITED", @@ -176,57 +225,57 @@ def test_guess_import_format(self): class TestContent: def test_file(self): m = content.File("test.ttl") - assert m.content_type == TURTLE + assert m.content_type == content_types.TURTLE assert m.content_encoding is None assert m.fname == "test.ttl" m = content.File("test.rdf") - assert m.content_type == RDF_XML + assert m.content_type == content_types.RDF_XML assert m.content_encoding is None m = content.File("test.rdfs") - assert m.content_type == RDF_XML + assert m.content_type == content_types.RDF_XML assert m.content_encoding is None m = content.File("test.owl") - assert m.content_type == RDF_XML + assert m.content_type == content_types.RDF_XML assert m.content_encoding is None m = content.File("test.xml") - assert m.content_type == RDF_XML + assert m.content_type == content_types.RDF_XML assert m.content_encoding is None m = content.File("test.nt") - assert m.content_type == NTRIPLES + assert m.content_type == content_types.NTRIPLES assert m.content_encoding is None m = content.File("test.n3") - assert m.content_type == N3 + assert m.content_type == content_types.N3 assert m.content_encoding is None m = content.File("test.nq") - assert m.content_type == NQUADS + assert m.content_type == content_types.NQUADS assert m.content_encoding is None m = content.File("test.nquads") - assert m.content_type == NQUADS + assert m.content_type == content_types.NQUADS assert m.content_encoding is None m = content.File("test.trig") - assert m.content_type == TRIG + assert m.content_type == content_types.TRIG assert m.content_encoding is None m = content.File("test.trix") - assert m.content_type == TRIX + assert m.content_type == content_types.TRIX assert m.content_encoding is None m = content.File("test.json") - assert m.content_type == LD_JSON + assert m.content_type == content_types.LD_JSON assert m.content_encoding is None m = content.File("test.jsonld") - assert m.content_type == LD_JSON + assert m.content_type == content_types.LD_JSON assert m.content_encoding is None m = content.File("test.turtle", content_type="text/turtle", name="overrideName") @@ -301,11 +350,11 @@ def test_mapping_file(self): assert m.name == "overrideName" def test_mapping_raw(self): - with open("data/test_import_delimited.sms") as f: + with open("test/data/test_import_delimited.sms") as f: m = content.MappingRaw(f.read()) assert m.syntax == "SMS2" - with open("data/r2rml.ttl") as f: + with open("test/data/r2rml.ttl") as f: m = content.MappingRaw(f.read()) assert m.syntax is None @@ -414,11 +463,11 @@ def test_import_raw(self): class TestStardogException: def test_exception_orig(self): # While not appropriate raise StardogException from scratch, let's check if it still works the old way - exception = stardog.exceptions.StardogException("Mymessage") + exception = exceptions.StardogException("Mymessage") assert str(exception) == "Mymessage" def test_exception(self): - exception = stardog.exceptions.StardogException("Mymessage", 400, "SD90A") + exception = exceptions.StardogException("Mymessage", 400, "SD90A") assert str(exception) == "Mymessage" assert exception.http_code == 400 assert exception.stardog_code == "SD90A" diff --git a/test/test_utils.py b/test/test_utils.py index e42deff..5da4390 100644 --- a/test/test_utils.py +++ b/test/test_utils.py @@ -1,5 +1,4 @@ -import stardog.content as content -import stardog.content_types as content_types +from stardog import content, content_types def test_content(): diff --git a/test/utils.py b/test/utils.py new file mode 100644 index 0000000..9b15381 --- /dev/null +++ b/test/utils.py @@ -0,0 +1,90 @@ +import pytest +import os +from time import sleep +import subprocess + + +SSH_USER = os.environ["SSH_USER"] +SSH_PASS = os.environ["SSH_PASS"] +STARDOG_HOSTNAME_NODE_1 = os.environ["STARDOG_HOSTNAME_NODE_1"] +STARDOG_HOSTNAME_STANDBY = os.environ["STARDOG_HOSTNAME_STANDBY"] + + +def get_node_ip(node_hostname): + node_ip = subprocess.run( + [ + "sshpass", + "-p", + SSH_PASS, + "ssh", + "-o", + "StrictHostKeyChecking=no", + f"ssh://{SSH_USER}@{node_hostname}:2222", + "--", + "hostname", + "-I", + ], + stdout=subprocess.PIPE, + ) + return node_ip.stdout + + +def get_current_node_count(admin): + return len(admin.cluster_info()["nodes"]) + + +def wait_standby_node_to_join(admin): + standby_node_ip = ( + get_node_ip(STARDOG_HOSTNAME_STANDBY).decode("utf-8").strip() + ":5820" + ) + retries = 0 + while True: + try: + if standby_node_ip in admin.cluster_info()["nodes"]: + print(f"current nodes: {admin.cluster_info()['nodes']}") + break + else: + print( + "http call did not fail, but node is still not listed in cluster info" + ) + except Exception as e: + print( + f"An exception ocurred while connecting to the standby node: {e}, will keep retrying" + ) + print(f"retries for now: {retries}") + retries += 1 + sleep(20) + if retries >= 50: + raise Exception("Took too long for standby node to join the cluster") + + +def wait_for_cleaning_cache_target(admin, cache_target_name): + retries = 0 + while True: + cache_targets = admin.cache_targets() + cache_target_names = [cache_target.name for cache_target in cache_targets] + if cache_target_name in cache_target_names: + retries += 1 + sleep(1) + if retries >= 20: + raise Exception( + "Took too long to remove cache target: " + cache_target_name + ) + else: + return + + +def wait_for_creating_cache_target(admin, cache_target_name): + retries = 0 + while True: + cache_targets = admin.cache_targets() + cache_target_names = [cache_target.name for cache_target in cache_targets] + if cache_target_name not in cache_target_names: + retries += 1 + sleep(1) + if retries >= 20: + raise Exception( + "Took too long to register cache target: " + cache_target_name + ) + else: + return diff --git a/utils/wait.sh b/test/utils/wait.sh similarity index 89% rename from utils/wait.sh rename to test/utils/wait.sh index 494399c..41f02b3 100755 --- a/utils/wait.sh +++ b/test/utils/wait.sh @@ -1,12 +1,12 @@ #!/bin/bash -function wait_for_start { +function wait_for_start_cluster { ( HOST=${1} PORT=${2} # Wait for stardog to be running COUNT=0 - set +e + set +ex not_ready=true while $not_ready do @@ -18,7 +18,8 @@ function wait_for_start { sleep 5 # wait for main cluster to be ready - number_of_nodes=$(curl -s http://${HOST}:${PORT}/admin/cluster/ -u ${STARDOG_USER}:${STARDOG_PASS} | jq .'nodes | length') + curl -v http://${HOST}:${PORT}/admin/cluster/ -u admin:admin + number_of_nodes=$(curl -s http://${HOST}:${PORT}/admin/cluster/ -u admin:admin | jq .'nodes | length') echo "number of nodes ready: " $number_of_nodes if [[ $number_of_nodes -eq 2 && $RC -eq 0 ]]; then break; fi @@ -73,7 +74,7 @@ function wait_for_start_single_node { COUNT=$(expr 1 + ${COUNT} ) sleep 5 - curl -s http://${HOST}:${PORT}/admin/healthcheck -u ${STARDOG_USER}:${STARDOG_PASS} + curl -s http://${HOST}:${PORT}/admin/healthcheck -u admin:admin if [ $? -eq 0 ]; then echo "Stardog server single node up and running" break diff --git a/utils/run_test_single_node.sh b/utils/run_test_single_node.sh deleted file mode 100755 index 972dd5c..0000000 --- a/utils/run_test_single_node.sh +++ /dev/null @@ -1,10 +0,0 @@ -#!/bin/bash -x - -source ./utils/wait.sh - -echo "OK" -wait_for_start_single_node ${STARDOG_ENDPOINT} 5820 - -echo ${STARDOG_ENDPOINT} -echo "READY" -pytest test/test_single_node.py --endpoint http://${STARDOG_ENDPOINT}:5820 -s diff --git a/utils/run_tests.sh b/utils/run_tests.sh deleted file mode 100755 index 40adc35..0000000 --- a/utils/run_tests.sh +++ /dev/null @@ -1,9 +0,0 @@ -#!/bin/bash -x - -source ./utils/wait.sh - -echo "Commenting and sleeping instead, to check whether there is a bug with cluster status" -wait_for_start ${STARDOG_LB} 5820 -wait_for_standby_node ${STARDOG_HOSTNAME_STANDBY} 5820 -pytest --endpoint http://${STARDOG_LB}:5820 -s -k 'not test_single_node' - From 7462815be602822eafee8d9613bb8ebe87eb1068 Mon Sep 17 00:00:00 2001 From: Simon Cardenas Date: Tue, 20 Dec 2022 16:53:25 -0300 Subject: [PATCH 5/8] Removes distutils warning message --- requirements.txt | 2 +- stardog/connection.py | 8 ++++---- stardog/utils.py | 9 +++++++++ 3 files changed, 14 insertions(+), 5 deletions(-) create mode 100644 stardog/utils.py diff --git a/requirements.txt b/requirements.txt index 2ba99d4..097dc99 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,4 +1,4 @@ -requests==2.22.0 +requests==2.28.1 requests-toolbelt==0.9.1 contextlib2==0.5.5 recommonmark==0.5.0 diff --git a/stardog/connection.py b/stardog/connection.py index 6dbd42a..499019a 100644 --- a/stardog/connection.py +++ b/stardog/connection.py @@ -2,11 +2,11 @@ """ import contextlib -import distutils.util from . import content_types as content_types from . import exceptions as exceptions from .http import client +from .utils import strtobool import urllib @@ -437,7 +437,7 @@ def ask(self, query, **kwargs): >>> conn.ask('ask {:subj :pred :obj}', reasoning=True) """ r = self.__query(query, "query", content_types.BOOLEAN, **kwargs) - return bool(distutils.util.strtobool(r.decode())) + return strtobool(r.decode()) def update(self, query, **kwargs): """Executes a SPARQL update query. @@ -473,7 +473,7 @@ def is_consistent(self, graph_uri=None): params={"graph-uri": graph_uri}, ) - return bool(distutils.util.strtobool(r.text)) + return strtobool(r.text) def explain_inference(self, content): """Explains the given inference results. @@ -731,7 +731,7 @@ def is_valid(self, content, graph_uri=None): params={"graph-uri": graph_uri}, ) - return bool(distutils.util.strtobool(r.text)) + return strtobool(r.text) def explain_violations(self, content, graph_uri=None): """ diff --git a/stardog/utils.py b/stardog/utils.py new file mode 100644 index 0000000..e4e9fbe --- /dev/null +++ b/stardog/utils.py @@ -0,0 +1,9 @@ +def strtobool(s): + truthy_values = ["y", "yes", "t", "true", "True", "on", 1] + falsy_values = ["n", "no", "f", "false", "False", "off", 0] + if s in truthy_values: + return True + elif s in falsy_values: + return False + else: + raise ValueError From 4195e5281a7ec1206de93dec3a58ea9dd5185759 Mon Sep 17 00:00:00 2001 From: Simon Cardenas Date: Thu, 22 Dec 2022 00:40:55 -0300 Subject: [PATCH 6/8] Adds style and lint checks --- .circleci/config.yml | 23 ++++++++++++++++++++--- setup.cfg | 34 +++++++++++++++++++++++++++++++++- setup.py | 29 ++--------------------------- stardog/__init__.py | 12 ++++++------ stardog/admin.py | 22 ++++++++++------------ stardog/connection.py | 8 ++++---- stardog/content.py | 2 +- stardog/content_types.py | 1 + stardog/exceptions.py | 2 -- stardog/http/client.py | 2 +- stardog/utils.py | 5 ++--- test-requirements.txt | 5 +++-- 12 files changed, 83 insertions(+), 62 deletions(-) diff --git a/.circleci/config.yml b/.circleci/config.yml index b7b1fc9..5cfdf94 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -62,17 +62,34 @@ pull_license: &pull_license echo ${pystardog_license} | base64 --decode > ~/project/dockerfiles/stardog-license-key.bin jobs: - format: + static_analysis: docker: - image: python:3.9 steps: - checkout - run: - name: Formatter + name: Install dependencies command: | pip install -r test-requirements.txt + + - run: + name: Black + command: | black --check . + - run: + # We fail hard if flake8 is not met. + name: Flake8 + command: | + flake8 stardog + + - run: + # We allow pylint to fail since it's too strict. + # We should fix these slowly, and come up with a proper configuration to enable limited checks. + name: pylint + command: | + pylint stardog --exit-zero + basic_test_suite_single_node: machine: image: ubuntu-2204:2022.10.2 @@ -149,7 +166,7 @@ jobs: workflows: build_and_test: jobs: - - format + - static_analysis - basic_test_suite_single_node - basic_test_cluster_mode # Have to reenable cache and standby tests diff --git a/setup.cfg b/setup.cfg index b7e4789..d693d13 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,2 +1,34 @@ +[metadata] +name = pystardog +version = 0.12.0 +author = Stardog Union +author_email = support@stardog.com +description = Use Stardog with Python! +long_description = file: README.md +long_description_content_type = text/markdown +url = https://github.com/stardog-union/pystardog +classifiers = + Programming Language :: Python :: 3 + License :: OSI Approved :: Apache Software License + Operating System :: OS Independent + +[options] +packages = find: +install_requires = + requests>=2.22.0 + requests-toolbelt>=0.9.1 + contextlib2>=0.5.5 +setup_requires = pytest-runner +tests_require = pytest + [aliases] -test=pytest +test = pytest + +[flake8] +ignore = E203,E501 + +[pylint.'MESSAGES CONTROL'] +disable = all +# We will only fail on Fatal, Errors and Warnings. +# https://pylint.pycqa.org/en/latest/user_guide/usage/run.html#exit-codes +enable = F,E,W \ No newline at end of file diff --git a/setup.py b/setup.py index 19aea94..6068493 100644 --- a/setup.py +++ b/setup.py @@ -1,28 +1,3 @@ -import setuptools +from setuptools import setup -with open("README.md", "r") as fh: - long_description = fh.read() - -setuptools.setup( - name="pystardog", - version="0.12.0", - author="Stardog Union", - author_email="support@stardog.com", - description="Use Stardog with Python!", - long_description=long_description, - long_description_content_type="text/markdown", - url="https://github.com/stardog-union/pystardog", - packages=setuptools.find_packages(), - classifiers=[ - "Programming Language :: Python :: 3", - "License :: OSI Approved :: Apache Software License", - "Operating System :: OS Independent", - ], - install_requires=[ - "requests>=2.22.0", - "requests-toolbelt>=0.9.1", - "contextlib2>=0.5.5", - ], - setup_requires=["pytest-runner"], - tests_require=["pytest"], -) +setup() diff --git a/stardog/__init__.py b/stardog/__init__.py index 75b5c06..c217cbf 100644 --- a/stardog/__init__.py +++ b/stardog/__init__.py @@ -1,7 +1,7 @@ -from stardog.admin import Admin -from stardog.connection import Connection -import stardog.content as content -import stardog.content_types as content_types -import stardog.exceptions as exceptions +from . import admin +from . import connection +from . import content +from . import content_types +from . import exceptions -__all__ = [Admin, Connection, content, content_types, exceptions] +__all__ = ["admin", "connection", "content", "content_types", "exceptions"] diff --git a/stardog/admin.py b/stardog/admin.py index 8023515..861cc3d 100644 --- a/stardog/admin.py +++ b/stardog/admin.py @@ -7,13 +7,11 @@ import urllib from time import sleep -from stardog.exceptions import StardogException - from . import content_types as content_types from .http import client -class Admin(object): +class Admin: """Admin Connection. This is the entry point for admin-related operations on a Stardog server. @@ -1045,7 +1043,7 @@ def __exit__(self, *args): self.client.close() -class Database(object): +class Database: """Database Admin See Also: @@ -1251,7 +1249,7 @@ def __eq__(self, other): return self.name == other.name -class StoredQuery(object): +class StoredQuery: """Stored Query See Also: @@ -1347,7 +1345,7 @@ def __eq__(self, other): return self.name == other.name -class User(object): +class User: """User See Also: @@ -1532,7 +1530,7 @@ def __eq__(self, other): return self.name == other.name -class Role(object): +class Role: """Role See Also: @@ -1643,7 +1641,7 @@ def __eq__(self, other): return self.name == other.name -class VirtualGraph(object): +class VirtualGraph: """Virtual Graph See Also: @@ -1799,7 +1797,7 @@ def __eq__(self, other): return self.name == other.name -class DataSource(object): +class DataSource: """Initializes a DataSource See Also: @@ -1903,7 +1901,7 @@ def __eq__(self, other): # We could get rid of this class, and the delete method here as admin.delete_stored_functions() can take a single stored function # and mimic this behaviour. This is intentionally put here in case more methods are added to StoredFunctions # in the future. -# class StoredFunction(object): +# class StoredFunction(): # def init(self): # """ # Initializes an StoredFunction @@ -1917,7 +1915,7 @@ def __eq__(self, other): # """ -class Cache(object): +class Cache: """Cached data A cached dataset from a query or named/virtual graph. @@ -1959,7 +1957,7 @@ def __eq__(self, other): return self.name == other.name -class CacheTarget(object): +class CacheTarget: """Cache Target Server""" def __init__(self, name, client): diff --git a/stardog/connection.py b/stardog/connection.py index 499019a..b76024f 100644 --- a/stardog/connection.py +++ b/stardog/connection.py @@ -10,7 +10,7 @@ import urllib -class Connection(object): +class Connection: """Database Connection. This is the entry point for all user-related operations on a @@ -546,7 +546,7 @@ def __exit__(self, *args): self.close() -class Docs(object): +class Docs: """BITES: Document Storage. See Also: @@ -638,7 +638,7 @@ def delete(self, name): self.client.delete("/docs/{}".format(name)) -class ICV(object): +class ICV: """Integrity Constraint Validation. See Also: @@ -843,7 +843,7 @@ def report(self, **kwargs): return r.text -class GraphQL(object): +class GraphQL: """GraphQL See Also: diff --git a/stardog/content.py b/stardog/content.py index 45ef59d..8404a58 100644 --- a/stardog/content.py +++ b/stardog/content.py @@ -8,7 +8,7 @@ from . import content_types as content_types -class Content(object): +class Content: """Content base class.""" pass diff --git a/stardog/content_types.py b/stardog/content_types.py index 484f3f5..459407d 100644 --- a/stardog/content_types.py +++ b/stardog/content_types.py @@ -153,3 +153,4 @@ def _get_extension(fname): pos = fname.rfind(".") if pos >= 0: return fname[pos + 1 :].lower() + raise Exception("File has no extension") diff --git a/stardog/exceptions.py b/stardog/exceptions.py index ae13489..c9664c0 100644 --- a/stardog/exceptions.py +++ b/stardog/exceptions.py @@ -10,5 +10,3 @@ def __init__(self, message, http_code=None, stardog_code=None): class TransactionException(StardogException): """Transaction Exceptions""" - - pass diff --git a/stardog/http/client.py b/stardog/http/client.py index 15a6e96..0162616 100644 --- a/stardog/http/client.py +++ b/stardog/http/client.py @@ -5,7 +5,7 @@ from .. import exceptions as exceptions -class Client(object): +class Client: DEFAULT_ENDPOINT = "http://localhost:5820" DEFAULT_USERNAME = "admin" DEFAULT_PASSWORD = "admin" diff --git a/stardog/utils.py b/stardog/utils.py index e4e9fbe..2e7c51e 100644 --- a/stardog/utils.py +++ b/stardog/utils.py @@ -3,7 +3,6 @@ def strtobool(s): falsy_values = ["n", "no", "f", "false", "False", "off", 0] if s in truthy_values: return True - elif s in falsy_values: + if s in falsy_values: return False - else: - raise ValueError + raise ValueError diff --git a/test-requirements.txt b/test-requirements.txt index 675b532..371df08 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,4 +1,5 @@ -pytest==6.2.5 +pytest==7.2.0 black==22.3.0 requests-mock==1.10.0 - +pylint==2.15.9 +flake8==6.0.0 From c557088008e7ae25662fb94af08a889668b3df28 Mon Sep 17 00:00:00 2001 From: Simon Cardenas Date: Tue, 10 Jan 2023 12:42:00 -0300 Subject: [PATCH 7/8] Tests supported python versions --- .circleci/config.yml | 44 +++++++++++++++++++++++++++++++--- docker-compose.cluster.yml | 6 +---- docker-compose.single-node.yml | 6 +---- test-requirements.txt | 1 + tox.ini | 21 ++++++++++++++++ 5 files changed, 65 insertions(+), 13 deletions(-) create mode 100644 tox.ini diff --git a/.circleci/config.yml b/.circleci/config.yml index 5cfdf94..e93a809 100644 --- a/.circleci/config.yml +++ b/.circleci/config.yml @@ -1,6 +1,17 @@ --- version: 2.1 +installs_supported_python_versions: &installs_supported_python_versions + run: + name: Install supported python versions + no_output_timeout: 15m + command: | + set -x + sudo apt-get update -y + yes | sudo add-apt-repository ppa:deadsnakes/ppa || true + sudo apt-get install python3.{8,9,10,11} -yq + sudo apt-get install python3.{8,9}-distutils -yq + run_basic_admin_suite: &run_basic_admin_suite run: name: Runs the basic admin suite @@ -41,6 +52,16 @@ run_utils_tests: &run_utils_tests set -x pytest test/test_utils.py -s +# See here for supported versions: https://devguide.python.org/versions/ +# We should support everything that's not EOL. +run_test_supported_python_versions: &run_test_supported_python_versions + run: + name: Runs the test_utils suite against all supported python versions. + no_output_timeout: 15m + command: | + set -x + tox + setup_pytest: &setup_pytest run: name: Set up local env @@ -64,7 +85,7 @@ pull_license: &pull_license jobs: static_analysis: docker: - - image: python:3.9 + - image: python:latest steps: - checkout - run: @@ -124,7 +145,16 @@ jobs: no_output_timeout: 15m command: | set -x - pytest test/test_single_node.py --endpoint http://localhost:5820 -s + pytest test/test_single_node.py -s + + - <<: *installs_supported_python_versions + - <<: *run_test_supported_python_versions + - run: + name: Runs the single_node_only_test against multiple python envs + no_output_timeout: 15m + command: | + set -x + tox -e single_node basic_test_cluster_mode: machine: @@ -161,7 +191,15 @@ jobs: no_output_timeout: 15m command: | set -x - pytest test/test_cluster.py --endpoint http://localhost:5820 -s + pytest test/test_cluster.py -s + + - <<: *installs_supported_python_versions + - run: + name: Runs the cluster node only tests against multiple python envs + no_output_timeout: 15m + command: | + set -x + tox -e cluster workflows: build_and_test: diff --git a/docker-compose.cluster.yml b/docker-compose.cluster.yml index 2d2c7bf..312ebc1 100644 --- a/docker-compose.cluster.yml +++ b/docker-compose.cluster.yml @@ -1,8 +1,4 @@ -# Note that circleci docker executor does not allow to volume mounting, hence the volume block can't be used here. -# https://support.circleci.com/hc/en-us/articles/360007324514-How-can-I-use-Docker-volume-mounting-on-CircleCI- -# For this reason, we are creating specific dockerfiles, and building the images from them, copying the files in the images -# instead of mounting them. - +--- version: "3.3" services: zoo1: diff --git a/docker-compose.single-node.yml b/docker-compose.single-node.yml index 02d841c..a026ec6 100644 --- a/docker-compose.single-node.yml +++ b/docker-compose.single-node.yml @@ -1,8 +1,4 @@ -# Note that circleci docker executor does not allow to volume mounting, hence the volume block can't be used here. -# https://support.circleci.com/hc/en-us/articles/360007324514-How-can-I-use-Docker-volume-mounting-on-CircleCI- -# For this reason, we are creating specific dockerfiles, and building the images from them, copying the files in the images -# instead of mounting them. - +--- version: "3.3" services: sd-single-node: diff --git a/test-requirements.txt b/test-requirements.txt index 371df08..f4b7df4 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -3,3 +3,4 @@ black==22.3.0 requests-mock==1.10.0 pylint==2.15.9 flake8==6.0.0 +tox==4.2.6 \ No newline at end of file diff --git a/tox.ini b/tox.ini new file mode 100644 index 0000000..c2b2159 --- /dev/null +++ b/tox.ini @@ -0,0 +1,21 @@ +[tox] +envlist = + py3{11,10,9,8} + +[testenv] +deps = + -r {toxinidir}/test-requirements.txt +commands = + pytest test/test_admin_basic.py test/test_connection.py test/test_server_admin.py test/test_unit.py test/test_utils.py -s + +[testenv:cluster] +deps = + -r {toxinidir}/test-requirements.txt +commands = + pytest test/test_cluster.py -s + +[testenv:single_node] +deps = + -r {toxinidir}/test-requirements.txt +commands = + pytest test/test_single_node.py -s \ No newline at end of file From 73e6edb9014585a0726e6edceeb60cf0cb352973 Mon Sep 17 00:00:00 2001 From: Simon Cardenas Date: Wed, 11 Jan 2023 15:00:57 -0300 Subject: [PATCH 8/8] Release 0.13.0 --- setup.cfg | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/setup.cfg b/setup.cfg index d693d13..9e7deb8 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,6 +1,6 @@ [metadata] name = pystardog -version = 0.12.0 +version = 0.13.0 author = Stardog Union author_email = support@stardog.com description = Use Stardog with Python!