Skip to content

Commit

Permalink
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
#1123 reduce nesting, fix iteration bug
Browse files Browse the repository at this point in the history
gabrielwol committed Jan 2, 2025

Verified

This commit was created on GitHub.com and signed with GitHub’s verified signature.
1 parent b0b4033 commit 9bfb2f5
Showing 1 changed file with 18 additions and 29 deletions.
47 changes: 18 additions & 29 deletions gis/gccview/gcc_puller_functions.py
Original file line number Diff line number Diff line change
@@ -373,12 +373,7 @@ def find_limit(return_json):
keep_adding : Boolean
boolean 'keep_adding' indicating if last query returned all rows in the layer
"""

if return_json.get('exceededTransferLimit', False) == True:
keep_adding = True
else:
keep_adding = False
return keep_adding
return return_json.get('exceededTransferLimit', False)

def insert_data(output_table, insert_column, return_json, schema_name, con, is_audited, is_partitioned):
"""
@@ -585,37 +580,31 @@ def get_layer(mapserver_n, layer_id, schema_name, is_audited, cred = None, con =
return
#--------------------------------
keep_adding = True
counter = 0
total = 0
#--------------------------------
if is_audited and primary_key is None:
LOGGER.error("Audited tables should have a primary key.")
if not(is_audited) and primary_key is not None:
LOGGER.error("Non-audited tables do not use the primary key.")
#--------------------------------
while keep_adding == True:
if counter == 0:
return_json = get_data(mapserver, layer_id)
if is_audited:
(insert_column, excluded_column) = create_audited_table(output_table, return_json, schema_name, primary_key, con)
elif is_partitioned:
(insert_column, output_table) = create_partitioned_table(output_table, return_json, schema_name, con)
else:
insert_column = create_table(output_table, return_json, schema_name, con)
features = return_json['features']
record_max=(len(features))
max_number = record_max
else:
return_json = get_data(mapserver, layer_id, max_number = max_number, record_max = record_max)

insert_data(output_table, insert_column, return_json, schema_name, con, is_audited, is_partitioned)
#get first data pull (no offset), create tables.
return_json = get_data(mapserver, layer_id)
if is_audited:
(insert_column, excluded_column) = create_audited_table(output_table, return_json, schema_name, primary_key, con)
elif is_partitioned:
(insert_column, output_table) = create_partitioned_table(output_table, return_json, schema_name, con)
else:
insert_column = create_table(output_table, return_json, schema_name, con)

counter += 1
keep_adding = find_limit(return_json)

while keep_adding:
insert_data(output_table, insert_column, return_json, schema_name, con, is_audited, is_partitioned)
record_count = len(return_json['features'])
total += record_count
keep_adding = find_limit(return_json) #checks if all records fetched
if keep_adding:
max_number += record_max
else:
LOGGER.info('All records from [mapserver: %s, layerID: %d] have been inserted into %s', mapserver, layer_id, output_table)
#get next batch using offset (max_number)
return_json = get_data(mapserver, layer_id, max_number = total, record_max = record_count)
LOGGER.info('%s records from [mapserver: %s, layerID: %d] have been inserted into %s', total, mapserver, layer_id, output_table)

if is_audited:
try:

0 comments on commit 9bfb2f5

Please sign in to comment.