From e4bc15de410f3b98bcb79dbeed1b6e93ddd534da Mon Sep 17 00:00:00 2001 From: Evan Morris Date: Thu, 10 Oct 2024 15:53:00 -0400 Subject: [PATCH] adding performance, sharing functions between them, better error handling --- .gitignore | 3 +- deploy/performance.py | 88 +++++++++++++++++++ .../hmdb_performance_queries.jsonl | 1 + .../robokopkg_performance_queries.jsonl | 14 +++ deploy/validate.py | 87 +++++++++--------- 5 files changed, 152 insertions(+), 41 deletions(-) create mode 100644 deploy/performance.py create mode 100644 deploy/performance_queries/hmdb_performance_queries.jsonl create mode 100644 deploy/performance_queries/robokopkg_performance_queries.jsonl diff --git a/.gitignore b/.gitignore index 87a2363..4d1d425 100644 --- a/.gitignore +++ b/.gitignore @@ -4,4 +4,5 @@ __pycache__ .log kubernets *coverage -.idea \ No newline at end of file +.idea +/deploy/performance_results \ No newline at end of file diff --git a/deploy/performance.py b/deploy/performance.py new file mode 100644 index 0000000..0cd5160 --- /dev/null +++ b/deploy/performance.py @@ -0,0 +1,88 @@ +import os +import yaml +import json +import time +import random +import requests + +from validate import send_cypher_query, send_trapi_query + + +def quick_jsonl_file_iterator(json_file): + with open(json_file, 'r', encoding='utf-8') as fp: + for line in fp: + try: + yield json.loads(line) + except json.decoder.JSONDecodeError as j: + yield {} + + +def run_performance_analysis(deployments_to_validate=None, performance_spec=None, iterations=3): + + graph_deployment_spec_path = os.path.join(os.path.dirname(__file__), 'deployment_spec.yaml') + with open(graph_deployment_spec_path) as graph_deployment_spec_file: + deployment_spec = yaml.safe_load(graph_deployment_spec_file) + plater_performance_results = {} + for deployment in deployment_spec['deployments']: + deployment_env = deployment['deployment_environment'] + automat_url = deployment['automat_url'] + if not deployments_to_validate or deployment_env in deployments_to_validate: + print(f'Running performance analysis for environment: {deployment_env}') + for plater, query_details in performance_spec.items(): + if plater not in plater_performance_results: + plater_performance_results[plater] = {} + plater_performance_results[plater][deployment_env] = {} + url = automat_url + plater + "/" if "localhost" not in automat_url else automat_url + query_files = query_details["files"] + queries = query_details["queries"] if "queries" in query_details else None + for q_file in query_files: + for performance_query in quick_jsonl_file_iterator(f'./{q_file}'): + if not performance_query: + continue + query_name = performance_query.pop('name') + if queries and query_name not in queries: + continue + plater_performance_results[plater][deployment_env][query_name] = {'success_duration': [], + 'errors': []} + for i in range(iterations): + print(f'Sending query {query_name} to {deployment_env}: {plater}, iteration {i+1}') + start_time = time.time() + try: + trapi_response = send_trapi_query(url, + performance_query, + profile=False, + validate=False) + num_results = len(trapi_response['message']['results']) + # print(trapi_response) + duration = time.time() - start_time + print(f'Got back {num_results} in {duration}.') + plater_performance_results[plater][deployment_env][query_name]['success_duration'].append(duration) + except requests.exceptions.HTTPError as e: + duration = time.time() - start_time + print(f'Error occured after {duration} seconds: {e}.') + plater_performance_results[plater][deployment_env][query_name]['errors'].append(str(e)) + + average = sum(plater_performance_results[plater][deployment_env][query_name]['success_duration']) \ + / len(plater_performance_results[plater][deployment_env][query_name]['success_duration']) + print(f'Average time for {query_name} to {deployment_env}, {plater}: {average}') + with open(f'./performance_results/performance_analysis_results_{random.randrange(100000)}.json', 'w') as p_out: + p_out.write(json.dumps(plater_performance_results, indent=4)) + + +if __name__ == '__main__': + + # environments = ['exp', 'dev', 'robokop'] + environments = ['robokop'] + + performance_spec = { + "robokopkg": {"files": ["./performance_queries/robokopkg_performance_queries.jsonl"], + "queries": ["gene_to_chemical_qualifier_40"]} + # "hmdb": {"files": ["./performance_queries/hmdb_performance_queries.jsonl"]} + } + + # to run for only certain environments + run_performance_analysis(environments, performance_spec, iterations=3) + + # or all of them + # run_performance_analysis() + diff --git a/deploy/performance_queries/hmdb_performance_queries.jsonl b/deploy/performance_queries/hmdb_performance_queries.jsonl new file mode 100644 index 0000000..ee6c421 --- /dev/null +++ b/deploy/performance_queries/hmdb_performance_queries.jsonl @@ -0,0 +1 @@ +{"query_name":"hmdb_example_1", "message":{"query_graph":{"nodes":{"n0":{"categories":["biolink:Pathway"],"ids":["SMPDB:SMP0000044"]},"n1":{"categories":["biolink:SmallMolecule"]}},"edges":{"e01":{"subject":"n0","object":"n1","predicates":["biolink:has_participant"]}}}},"workflow":[{"id":"lookup"}]} \ No newline at end of file diff --git a/deploy/performance_queries/robokopkg_performance_queries.jsonl b/deploy/performance_queries/robokopkg_performance_queries.jsonl new file mode 100644 index 0000000..27bfc8d --- /dev/null +++ b/deploy/performance_queries/robokopkg_performance_queries.jsonl @@ -0,0 +1,14 @@ +{"name": "gene_to_chemical_1", "message":{"query_graph":{"nodes":{"source":{"categories":["biolink:ChemicalEntity"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]},"i":{"ids":["NCBIGene:6868"],"categories":["biolink:Gene","biolink:Protein"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]}},"edges":{"edge_1":{"subject":"source","object":"i","predicates":["biolink:affects"],"attribute_constraints":[],"qualifier_constraints":[]}}}},"bypass_cache":true,"submitter":"infores:aragorn"} +{"name": "gene_to_chemical_5", "message":{"query_graph":{"nodes":{"source":{"categories":["biolink:ChemicalEntity"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]},"i":{"ids":["NCBIGene:6868","NCBIGene:1557","NCBIGene:3738","NCBIGene:5141","NCBIGene:3754"],"categories":["biolink:Gene","biolink:Protein"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]}},"edges":{"edge_1":{"subject":"source","object":"i","predicates":["biolink:affects"],"attribute_constraints":[],"qualifier_constraints":[]}}}},"bypass_cache":true,"submitter":"infores:aragorn"} +{"name": "gene_to_chemical_10", "message":{"query_graph":{"nodes":{"source":{"categories":["biolink:ChemicalEntity"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]},"i":{"ids":["NCBIGene:6868","NCBIGene:1557","NCBIGene:3738","NCBIGene:5141","NCBIGene:3754","NCBIGene:2908","NCBIGene:2350","NCBIGene:6573","NCBIGene:1723","NCBIGene:8698"],"categories":["biolink:Gene","biolink:Protein"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]}},"edges":{"edge_1":{"subject":"source","object":"i","predicates":["biolink:affects"],"attribute_constraints":[],"qualifier_constraints":[]}}}},"bypass_cache":true,"submitter":"infores:aragorn"} +{"name": "gene_to_chemical_20", "message":{"query_graph":{"nodes":{"source":{"categories":["biolink:ChemicalEntity"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]},"i":{"ids":["NCBIGene:6868","NCBIGene:1557","NCBIGene:3738","NCBIGene:5141","NCBIGene:3754","NCBIGene:2908","NCBIGene:2350","NCBIGene:6573","NCBIGene:1723","NCBIGene:8698","NCBIGene:7099","NCBIGene:1565","NCBIGene:90134","NCBIGene:151","NCBIGene:148","NCBIGene:5150","NCBIGene:151306","NCBIGene:5142","NCBIGene:6532","NCBIGene:4306"],"categories":["biolink:Gene","biolink:Protein"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]}},"edges":{"edge_1":{"subject":"source","object":"i","predicates":["biolink:affects"],"attribute_constraints":[],"qualifier_constraints":[]}}}},"bypass_cache":true,"submitter":"infores:aragorn"} +{"name": "gene_to_chemical_30", "message":{"query_graph":{"nodes":{"source":{"categories":["biolink:ChemicalEntity"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]},"i":{"ids":["NCBIGene:6868","NCBIGene:1557","NCBIGene:3738","NCBIGene:5141","NCBIGene:3754","NCBIGene:2908","NCBIGene:2350","NCBIGene:6573","NCBIGene:1723","NCBIGene:8698","NCBIGene:7099","NCBIGene:1565","NCBIGene:90134","NCBIGene:151","NCBIGene:148","NCBIGene:5150","NCBIGene:151306","NCBIGene:5142","NCBIGene:6532","NCBIGene:4306", "NCBIGene:53637","NCBIGene:14396","NCBIGene:367","NCBIGene:1576","NCBIGene:3569","NCBIGene:2904","NCBIGene:338442","NCBIGene:2902","NCBIGene:59340","NCBIGene:5144"],"categories":["biolink:Gene","biolink:Protein"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]}},"edges":{"edge_1":{"subject":"source","object":"i","predicates":["biolink:affects"],"attribute_constraints":[],"qualifier_constraints":[]}}}},"bypass_cache":true,"submitter":"infores:aragorn"} +{"name": "gene_to_chemical_40", "message":{"query_graph":{"nodes":{"source":{"categories":["biolink:ChemicalEntity"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]},"i":{"ids":["NCBIGene:6868","NCBIGene:1557","NCBIGene:3738","NCBIGene:5141","NCBIGene:3754","NCBIGene:2908","NCBIGene:2350","NCBIGene:6573","NCBIGene:1723","NCBIGene:8698","NCBIGene:7099","NCBIGene:1565","NCBIGene:90134","NCBIGene:151","NCBIGene:148","NCBIGene:5150","NCBIGene:151306","NCBIGene:5142","NCBIGene:6532","NCBIGene:4306", "NCBIGene:53637","NCBIGene:14396","NCBIGene:367","NCBIGene:1576","NCBIGene:3569","NCBIGene:2904","NCBIGene:338442","NCBIGene:2902","NCBIGene:59340","NCBIGene:5144","NCBIGene:6531","NCBIGene:140","NCBIGene:2556","NCBIGene:3269","NCBIGene:3766","NCBIGene:774","NCBIGene:1813","NCBIGene:1903","NCBIGene:24947","NCBIGene:135"],"categories":["biolink:Gene","biolink:Protein"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]}},"edges":{"edge_1":{"subject":"source","object":"i","predicates":["biolink:affects"],"attribute_constraints":[],"qualifier_constraints":[]}}}},"bypass_cache":true,"submitter":"infores:aragorn"} +{"name": "gene_to_chemical_qualifier_1", "message":{"query_graph":{"nodes":{"source":{"categories":["biolink:ChemicalEntity"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]},"i":{"ids":["NCBIGene:6868"],"categories":["biolink:Gene","biolink:Protein"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]}},"edges":{"edge_1":{"subject":"source","object":"i","predicates":["biolink:affects"],"attribute_constraints":[],"qualifier_constraints":[{"qualifier_set":[{"qualifier_type_id":"biolink:object_aspect_qualifier","qualifier_value":"activity_or_abundance"},{"qualifier_type_id":"biolink:object_direction_qualifier","qualifier_value":"decreased"}]}]}}}},"bypass_cache":true,"submitter":"infores:aragorn"} +{"name": "gene_to_chemical_qualifier_5", "message":{"query_graph":{"nodes":{"source":{"categories":["biolink:ChemicalEntity"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]},"i":{"ids":["NCBIGene:6868","NCBIGene:1557","NCBIGene:3738","NCBIGene:5141","NCBIGene:3754"],"categories":["biolink:Gene","biolink:Protein"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]}},"edges":{"edge_1":{"subject":"source","object":"i","predicates":["biolink:affects"],"attribute_constraints":[],"qualifier_constraints":[{"qualifier_set":[{"qualifier_type_id":"biolink:object_aspect_qualifier","qualifier_value":"activity_or_abundance"},{"qualifier_type_id":"biolink:object_direction_qualifier","qualifier_value":"decreased"}]}]}}}},"bypass_cache":true,"submitter":"infores:aragorn"} +{"name": "gene_to_chemical_qualifier_10", "message":{"query_graph":{"nodes":{"source":{"categories":["biolink:ChemicalEntity"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]},"i":{"ids":["NCBIGene:6868","NCBIGene:1557","NCBIGene:3738","NCBIGene:5141","NCBIGene:3754","NCBIGene:2908","NCBIGene:2350","NCBIGene:6573","NCBIGene:1723","NCBIGene:8698"],"categories":["biolink:Gene","biolink:Protein"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]}},"edges":{"edge_1":{"subject":"source","object":"i","predicates":["biolink:affects"],"attribute_constraints":[],"qualifier_constraints":[{"qualifier_set":[{"qualifier_type_id":"biolink:object_aspect_qualifier","qualifier_value":"activity_or_abundance"},{"qualifier_type_id":"biolink:object_direction_qualifier","qualifier_value":"decreased"}]}]}}}},"bypass_cache":true,"submitter":"infores:aragorn"} +{"name": "gene_to_chemical_qualifier_20", "message":{"query_graph":{"nodes":{"source":{"categories":["biolink:ChemicalEntity"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]},"i":{"ids":["NCBIGene:6868","NCBIGene:1557","NCBIGene:3738","NCBIGene:5141","NCBIGene:3754","NCBIGene:2908","NCBIGene:2350","NCBIGene:6573","NCBIGene:1723","NCBIGene:8698","NCBIGene:7099","NCBIGene:1565","NCBIGene:90134","NCBIGene:151","NCBIGene:148","NCBIGene:5150","NCBIGene:151306","NCBIGene:5142","NCBIGene:6532","NCBIGene:4306"],"categories":["biolink:Gene","biolink:Protein"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]}},"edges":{"edge_1":{"subject":"source","object":"i","predicates":["biolink:affects"],"attribute_constraints":[],"qualifier_constraints":[{"qualifier_set":[{"qualifier_type_id":"biolink:object_aspect_qualifier","qualifier_value":"activity_or_abundance"},{"qualifier_type_id":"biolink:object_direction_qualifier","qualifier_value":"decreased"}]}]}}}},"bypass_cache":true,"submitter":"infores:aragorn"} +{"name": "gene_to_chemical_qualifier_30", "message":{"query_graph":{"nodes":{"source":{"categories":["biolink:ChemicalEntity"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]},"i":{"ids":["NCBIGene:6868","NCBIGene:1557","NCBIGene:3738","NCBIGene:5141","NCBIGene:3754","NCBIGene:2908","NCBIGene:2350","NCBIGene:6573","NCBIGene:1723","NCBIGene:8698","NCBIGene:7099","NCBIGene:1565","NCBIGene:90134","NCBIGene:151","NCBIGene:148","NCBIGene:5150","NCBIGene:151306","NCBIGene:5142","NCBIGene:6532","NCBIGene:4306","NCBIGene:53637","NCBIGene:14396","NCBIGene:367","NCBIGene:1576","NCBIGene:3569","NCBIGene:2904","NCBIGene:338442","NCBIGene:2902","NCBIGene:59340","NCBIGene:5144"],"categories":["biolink:Gene","biolink:Protein"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]}},"edges":{"edge_1":{"subject":"source","object":"i","predicates":["biolink:affects"],"attribute_constraints":[],"qualifier_constraints":[{"qualifier_set":[{"qualifier_type_id":"biolink:object_aspect_qualifier","qualifier_value":"activity_or_abundance"},{"qualifier_type_id":"biolink:object_direction_qualifier","qualifier_value":"decreased"}]}]}}}},"bypass_cache":true,"submitter":"infores:aragorn"} +{"name": "gene_to_chemical_qualifier_40", "message":{"query_graph":{"nodes":{"source":{"categories":["biolink:ChemicalEntity"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]},"i":{"ids":["NCBIGene:6868","NCBIGene:1557","NCBIGene:3738","NCBIGene:5141","NCBIGene:3754","NCBIGene:2908","NCBIGene:2350","NCBIGene:6573","NCBIGene:1723","NCBIGene:8698","NCBIGene:7099","NCBIGene:1565","NCBIGene:90134","NCBIGene:151","NCBIGene:148","NCBIGene:5150","NCBIGene:151306","NCBIGene:5142","NCBIGene:6532","NCBIGene:4306","NCBIGene:53637","NCBIGene:14396","NCBIGene:367","NCBIGene:1576","NCBIGene:3569","NCBIGene:2904","NCBIGene:338442","NCBIGene:2902","NCBIGene:59340","NCBIGene:5144","NCBIGene:6531","NCBIGene:140","NCBIGene:2556","NCBIGene:3269","NCBIGene:3766","NCBIGene:774","NCBIGene:1813","NCBIGene:1903","NCBIGene:24947","NCBIGene:135"],"categories":["biolink:Gene","biolink:Protein"],"set_interpretation":"BATCH","constraints":[],"member_ids":[]}},"edges":{"edge_1":{"subject":"source","object":"i","predicates":["biolink:affects"],"attribute_constraints":[],"qualifier_constraints":[{"qualifier_set":[{"qualifier_type_id":"biolink:object_aspect_qualifier","qualifier_value":"activity_or_abundance"},{"qualifier_type_id":"biolink:object_direction_qualifier","qualifier_value":"decreased"}]}]}}}},"bypass_cache":true,"submitter":"infores:aragorn"} + + diff --git a/deploy/validate.py b/deploy/validate.py index 324adc3..9704029 100644 --- a/deploy/validate.py +++ b/deploy/validate.py @@ -2,6 +2,7 @@ import os import yaml + def get_metadata(url): # get the metadata and check the version, get the number of nodes that should be in the graph metadata_response = requests.get(f'{url}metadata') @@ -11,8 +12,19 @@ def get_metadata(url): return metadata -def make_cypher_call(url, cypher): - # query the graph with cypher +def send_trapi_query(url, trapi_query, profile=False, validate=False): + trapi_url = f'{url}query?' + if profile: + trapi_url += 'profile=true' + elif validate: + trapi_url += 'validate=true' + trapi_query_response = requests.post(trapi_url, json=trapi_query) + if trapi_query_response.status_code != 200: + trapi_query_response.raise_for_status() + return trapi_query_response.json() + + +def send_cypher_query(url, cypher): cypher_query_payload = {"query": cypher} cypher_response = requests.post(f'{url}cypher', json=cypher_query_payload) if cypher_response.status_code != 200: @@ -53,16 +65,13 @@ def validate_plater(url, expected_version, expected_plater_version, expected_tra results['expected_number_of_edges'] = expected_number_of_edges # query the graph with cypher to check if the neo4j instance is up and has the right number of nodes - cypher_query_payload = {"query": f"MATCH (n) RETURN count(n)"} - cypher_response = requests.post(f'{url}cypher', json=cypher_query_payload) - if cypher_response.status_code != 200: - try: - cypher_response.raise_for_status() - except requests.exceptions.HTTPError as e: - results['validation_errors'].append(f'Running cypher query failed: {str(e)}') - return results try: - number_of_nodes = cypher_response.json()['results'][0]['data'][0]['row'][0] + cypher_response = send_cypher_query(url, "MATCH (n) RETURN count(n)") + except requests.exceptions.HTTPError as e: + results['validation_errors'].append(f'Running cypher query failed: {str(e)}') + return results + try: + number_of_nodes = cypher_response['results'][0]['data'][0]['row'][0] results['actual_number_of_nodes'] = number_of_nodes except KeyError: results['validation_errors'].append(f'Cypher query returned an invalid result.') @@ -74,19 +83,15 @@ def validate_plater(url, expected_version, expected_plater_version, expected_tra error_message = f'Metadata said there should be {expected_number_of_nodes} nodes, ' \ f'but cypher query returned: {number_of_nodes}.' results['validation_errors'].append(error_message) - return results # query the graph with cypher to check if the neo4j instance has the right number of edges - cypher_query_payload = {"query": f"MATCH (n)-[r]->(m) RETURN count(r)"} - cypher_response = requests.post(f'{url}cypher', json=cypher_query_payload) - if cypher_response.status_code != 200: - try: - cypher_response.raise_for_status() - except requests.exceptions.HTTPError as e: - results['validation_errors'].append(f'Running cypher query failed: {str(e)}') - return results try: - number_of_edges = cypher_response.json()['results'][0]['data'][0]['row'][0] + cypher_response = send_cypher_query(url, "MATCH (n)-[r]->(m) RETURN count(r)") + except requests.exceptions.HTTPError as e: + results['validation_errors'].append(f'Running cypher query failed: {str(e)}') + return results + try: + number_of_edges = cypher_response['results'][0]['data'][0]['row'][0] results['actual_number_of_edges'] = number_of_edges except KeyError: results['validation_errors'].append(f'Cypher query returned an invalid result.') @@ -98,10 +103,13 @@ def validate_plater(url, expected_version, expected_plater_version, expected_tra error_message = f'Metadata said there should be {expected_number_of_edges} edges, ' \ f'but cypher query returned: {number_of_edges}.' results['validation_errors'].append(error_message) - return results if run_warmup: - make_cypher_call(url, 'CALL apoc.warmup.run(True, True, True)') + try: + send_cypher_query(url, 'CALL apoc.warmup.run(True, True, True)') + except requests.exceptions.HTTPError as e: + results['validation_errors'].append(f'Running warmup cypher query failed: {str(e)}') + return results # get the open api spec and the example trapi query from it openapi_response = requests.get(f'{url}openapi.json') @@ -116,36 +124,31 @@ def validate_plater(url, expected_version, expected_plater_version, expected_tra openapi_plater_version = openapi_spec['info']['version'] if openapi_plater_version != expected_plater_version: results['validation_errors'].append(f'Expected plater version {expected_plater_version} but openapi says {openapi_plater_version}') - return results openapi_trapi_version = openapi_spec['info']['x-trapi']['version'] if openapi_trapi_version != expected_trapi_version: results['validation_errors'].append(f'Expected TRAPI version {expected_trapi_version} but openapi says {openapi_trapi_version}') - return results example_trapi_query = openapi_spec['paths']['/query']['post']['requestBody']['content']['application/json']['example'] # send the example trapi query and make sure it works - trapi_query_response = requests.post(f'{url}query', json=example_trapi_query) - if trapi_query_response.status_code != 200: - try: - cypher_response.raise_for_status() - except requests.exceptions.HTTPError as e: - results['validation_errors'].append(f'Sending a trapi query failed: {str(e)}') - return results - trapi_query_results = trapi_query_response.json()['message'] + try: + trapi_query_response = send_trapi_query(url, example_trapi_query) + except requests.exceptions.HTTPError as e: + results['validation_errors'].append(f'Sending a trapi query failed: {str(e)}') + return results + trapi_query_results = trapi_query_response['message'] if 'knowledge_graph' not in trapi_query_results or 'results' not in trapi_query_results: results['validation_errors'].append(f'Trapi query results were poorly formatted: {trapi_query_results}') return results if len(trapi_query_results['results']) == 0: results['validation_errors'].append(f'Example trapi query did not yield any results.') - return results - results['valid'] = True + + results['valid'] = True if len(results['validation_errors']) == 0 else False return results -def run_validation(deployments_to_validate=None): - deployments_to_validate = deployments_to_validate if deployments_to_validate else None +def run_validation(deployments_to_validate=None, run_warmup=False): everything_is_good = True graph_deployment_spec_path = os.path.join(os.path.dirname(__file__), 'deployment_spec.yaml') with open(graph_deployment_spec_path) as graph_deployment_spec_file: @@ -161,11 +164,12 @@ def run_validation(deployments_to_validate=None): trapi_version = deployment['trapi_version'] plater_version = deployment['plater_version'] for plater_id, graph_version in deployment['platers'].items(): + print(f"Validating plater {automat_url}{plater_id}.") validation_results = validate_plater(f'{automat_url}{plater_id}/', graph_version, plater_version, trapi_version, - run_warmup=False) + run_warmup=run_warmup) validation_errors = "\n".join(validation_results['validation_errors']) if validation_errors: everything_is_good = False @@ -181,7 +185,10 @@ def run_validation(deployments_to_validate=None): if __name__ == '__main__': # to run for only certain environments - # run_validation(['dev', 'robokop']) + run_validation(['robokop'], run_warmup=True) # or all of them - run_validation() + # run_validation() + + +