From 9bfb2f5f2b572c936cc12c2b1ee252a94013682b Mon Sep 17 00:00:00 2001 From: Gabe Wolofsky <80077912+gabrielwol@users.noreply.github.com> Date: Thu, 2 Jan 2025 18:10:34 -0500 Subject: [PATCH] #1123 reduce nesting, fix iteration bug --- gis/gccview/gcc_puller_functions.py | 47 +++++++++++------------------ 1 file changed, 18 insertions(+), 29 deletions(-) diff --git a/gis/gccview/gcc_puller_functions.py b/gis/gccview/gcc_puller_functions.py index 98a7461f7..da890efd1 100644 --- a/gis/gccview/gcc_puller_functions.py +++ b/gis/gccview/gcc_puller_functions.py @@ -373,12 +373,7 @@ def find_limit(return_json): keep_adding : Boolean boolean 'keep_adding' indicating if last query returned all rows in the layer """ - - if return_json.get('exceededTransferLimit', False) == True: - keep_adding = True - else: - keep_adding = False - return keep_adding + return return_json.get('exceededTransferLimit', False) def insert_data(output_table, insert_column, return_json, schema_name, con, is_audited, is_partitioned): """ @@ -585,37 +580,31 @@ def get_layer(mapserver_n, layer_id, schema_name, is_audited, cred = None, con = return #-------------------------------- keep_adding = True - counter = 0 + total = 0 #-------------------------------- if is_audited and primary_key is None: LOGGER.error("Audited tables should have a primary key.") if not(is_audited) and primary_key is not None: LOGGER.error("Non-audited tables do not use the primary key.") #-------------------------------- - while keep_adding == True: - if counter == 0: - return_json = get_data(mapserver, layer_id) - if is_audited: - (insert_column, excluded_column) = create_audited_table(output_table, return_json, schema_name, primary_key, con) - elif is_partitioned: - (insert_column, output_table) = create_partitioned_table(output_table, return_json, schema_name, con) - else: - insert_column = create_table(output_table, return_json, schema_name, con) - features = return_json['features'] - record_max=(len(features)) - max_number = record_max - else: - return_json = get_data(mapserver, layer_id, max_number = max_number, record_max = record_max) - - insert_data(output_table, insert_column, return_json, schema_name, con, is_audited, is_partitioned) + #get first data pull (no offset), create tables. + return_json = get_data(mapserver, layer_id) + if is_audited: + (insert_column, excluded_column) = create_audited_table(output_table, return_json, schema_name, primary_key, con) + elif is_partitioned: + (insert_column, output_table) = create_partitioned_table(output_table, return_json, schema_name, con) + else: + insert_column = create_table(output_table, return_json, schema_name, con) - counter += 1 - keep_adding = find_limit(return_json) - + while keep_adding: + insert_data(output_table, insert_column, return_json, schema_name, con, is_audited, is_partitioned) + record_count = len(return_json['features']) + total += record_count + keep_adding = find_limit(return_json) #checks if all records fetched if keep_adding: - max_number += record_max - else: - LOGGER.info('All records from [mapserver: %s, layerID: %d] have been inserted into %s', mapserver, layer_id, output_table) + #get next batch using offset (max_number) + return_json = get_data(mapserver, layer_id, max_number = total, record_max = record_count) + LOGGER.info('%s records from [mapserver: %s, layerID: %d] have been inserted into %s', total, mapserver, layer_id, output_table) if is_audited: try: