From 9cf5f249b2142d89fdaf8ad3bfbfffcb337d634c Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Wed, 21 Sep 2022 14:07:49 +0545 Subject: [PATCH 001/153] added flower celery and redis --- API/api_worker.py | 120 ++++++++++++++++++++++++++++++++++++++++++++ API/raw_data.py | 124 ++++------------------------------------------ requirements.txt | 5 +- 3 files changed, 133 insertions(+), 116 deletions(-) create mode 100644 API/api_worker.py diff --git a/API/api_worker.py b/API/api_worker.py new file mode 100644 index 00000000..9cceb7cb --- /dev/null +++ b/API/api_worker.py @@ -0,0 +1,120 @@ +import os +import time +import pathlib +import orjson +import shutil +from datetime import datetime as dt +from uuid import uuid4 +import zipfile +from celery import Celery +from src.galaxy.config import config +from fastapi.responses import JSONResponse +from src.galaxy.query_builder.builder import format_file_name_str +from src.galaxy.validation.models import RawDataCurrentParams, RawDataOutputType +from src.galaxy.app import RawData, S3FileTransfer +from src.galaxy.config import use_s3_to_upload, logger as logging, config + +celery = Celery(__name__) +celery.conf.broker_url = config.get( + "CELERY", "CELERY_BROKER_URL", fallback="redis://localhost:6379" +) +celery.conf.result_backend = config.get( + "CELERY", "CELERY_RESULT_BACKEND", fallback="redis://localhost:6379" +) # using redis as backend , make sure you have redis server started on your system on port 6379 + + +@celery.task(name="process_raw_data") +def process_raw_data(request, params, background_tasks): + start_time = dt.now() + if ( + params.output_type is None + ): # if no ouput type is supplied default is geojson output + params.output_type = RawDataOutputType.GEOJSON.value + + # unique id for zip file and geojson for each export + if params.file_name: + # need to format string from space to _ because it is filename , may be we need to filter special character as well later on + formatted_file_name = format_file_name_str(params.file_name) + # exportname = f"{formatted_file_name}_{datetime.now().isoformat()}_{str(uuid4())}" + exportname = f"""{formatted_file_name}_{str(uuid4())}_{params.output_type}""" # disabled date for now + + else: + # exportname = f"Raw_Export_{datetime.now().isoformat()}_{str(uuid4())}" + exportname = f"Raw_Export_{str(uuid4())}_{params.output_type}" + + logging.info("Request %s received", exportname) + + dump_temp_file, geom_area, root_dir_file = RawData(params).extract_current_data( + exportname + ) + path = f"""{root_dir_file}{exportname}/""" + + if os.path.exists(path) is False: + return JSONResponse(status_code=400, content={"Error": "Request went too big"}) + + logging.debug("Zip Binding Started !") + # saving file in temp directory instead of memory so that zipping file will not eat memory + zip_temp_path = f"""{root_dir_file}{exportname}.zip""" + zf = zipfile.ZipFile(zip_temp_path, "w", zipfile.ZIP_DEFLATED) + + directory = pathlib.Path(path) + for file_path in directory.iterdir(): + zf.write(file_path, arcname=file_path.name) + + # Compressing geojson file + zf.writestr("clipping_boundary.geojson", orjson.dumps(dict(params.geometry))) + + zf.close() + logging.debug("Zip Binding Done !") + inside_file_size = 0 + for temp_file in dump_temp_file: + # clearing tmp geojson file since it is already dumped to zip file we don't need it anymore + if os.path.exists(temp_file): + inside_file_size += os.path.getsize(temp_file) + + # remove the file that are just binded to zip file , we no longer need to store it + background_tasks.add_task(remove_file, path) + + # check if download url will be generated from s3 or not from config + if use_s3_to_upload: + file_transfer_obj = S3FileTransfer() + download_url = file_transfer_obj.upload(zip_temp_path, exportname) + else: + + # getting from config in case api and frontend is not hosted on same url + client_host = config.get( + "API_CONFIG", + "api_host", + fallback=f"""{request.url.scheme}://{request.client.host}""", + ) + client_port = config.get("API_CONFIG", "api_port", fallback=8000) + + if client_port: + download_url = f"""{client_host}:{client_port}/v1/exports/{exportname}.zip""" # disconnected download portion from this endpoint because when there will be multiple hits at a same time we don't want function to get stuck waiting for user to download the file and deliver the response , we want to reduce waiting time and free function ! + else: + download_url = f"""{client_host}/v1/exports/{exportname}.zip""" # disconnected download portion from this endpoint because when there will be multiple hits at a same time we don't want function to get stuck waiting for user to download the file and deliver the response , we want to reduce waiting time and free function ! + + # getting file size of zip , units are in bytes converted to mb in response + zip_file_size = os.path.getsize(zip_temp_path) + response_time = dt.now() - start_time + response_time_str = str(response_time) + logging.info( + f"Done Export : {exportname} of {round(inside_file_size/1000000)} MB / {geom_area} sqkm in {response_time_str}" + ) + + return { + "download_url": download_url, + "file_name": exportname, + "response_time": response_time_str, + "query_area": f"""{geom_area} Sq Km """, + "binded_file_size": f"""{round(inside_file_size/1000000)} MB""", + "zip_file_size_bytes": {zip_file_size}, + } + + +def remove_file(path: str) -> None: + """Used for removing temp file dir and its all content after zip file is delivered to user""" + try: + shutil.rmtree(path) + except OSError as ex: + logging.error("Error: %s - %s.", ex.filename, ex.strerror) diff --git a/API/raw_data.py b/API/raw_data.py index 6b8d4fb5..7ac9c3da 100644 --- a/API/raw_data.py +++ b/API/raw_data.py @@ -25,6 +25,7 @@ import time import zipfile import requests + # from .auth import login_required import pathlib import shutil @@ -34,12 +35,14 @@ from fastapi import APIRouter, Request from fastapi_versioning import version from fastapi.responses import JSONResponse + # from fastapi import APIRouter, Depends, Request from src.galaxy.query_builder.builder import format_file_name_str from src.galaxy.validation.models import RawDataCurrentParams, RawDataOutputType from src.galaxy.app import RawData, S3FileTransfer from src.galaxy.config import use_s3_to_upload, logger as logging, config +from api_worker import process_raw_data router = APIRouter(prefix="/raw-data") @@ -52,7 +55,9 @@ @router.post("/current-snapshot/") @version(1) -def get_current_data(params: RawDataCurrentParams, background_tasks: BackgroundTasks, request: Request): +def get_current_data( + params: RawDataCurrentParams, background_tasks: BackgroundTasks, request: Request +): """Generates the current raw OpenStreetMap data available on database based on the input geometry, query and spatial features Args: @@ -207,84 +212,9 @@ def get_current_data(params: RawDataCurrentParams, background_tasks: BackgroundT } """ -# def get_current_data(params:RawDataCurrentParams,background_tasks: BackgroundTasks, user_data=Depends(login_required)): # this will use osm login makes it restrict login - start_time = dt.now() - if params.output_type is None: # if no ouput type is supplied default is geojson output - params.output_type = RawDataOutputType.GEOJSON.value - - # unique id for zip file and geojson for each export - if params.file_name: - # need to format string from space to _ because it is filename , may be we need to filter special character as well later on - formatted_file_name = format_file_name_str(params.file_name) - # exportname = f"{formatted_file_name}_{datetime.now().isoformat()}_{str(uuid4())}" - exportname = f"""{formatted_file_name}_{str(uuid4())}_{params.output_type}""" # disabled date for now - - else: - # exportname = f"Raw_Export_{datetime.now().isoformat()}_{str(uuid4())}" - exportname = f"Raw_Export_{str(uuid4())}_{params.output_type}" - - logging.info("Request %s received", exportname) - - dump_temp_file, geom_area, root_dir_file = RawData( - params).extract_current_data(exportname) - path = f"""{root_dir_file}{exportname}/""" - - if os.path.exists(path) is False: - return JSONResponse( - status_code=400, - content={"Error": "Request went too big"} - ) - - logging.debug('Zip Binding Started !') - # saving file in temp directory instead of memory so that zipping file will not eat memory - zip_temp_path = f"""{root_dir_file}{exportname}.zip""" - zf = zipfile.ZipFile(zip_temp_path, "w", zipfile.ZIP_DEFLATED) - - directory = pathlib.Path(path) - for file_path in directory.iterdir(): - zf.write(file_path, arcname=file_path.name) - - # Compressing geojson file - zf.writestr("clipping_boundary.geojson", - orjson.dumps(dict(params.geometry))) - - zf.close() - logging.debug('Zip Binding Done !') - inside_file_size = 0 - for temp_file in dump_temp_file: - # clearing tmp geojson file since it is already dumped to zip file we don't need it anymore - if os.path.exists(temp_file): - inside_file_size += os.path.getsize(temp_file) - - # remove the file that are just binded to zip file , we no longer need to store it - background_tasks.add_task(remove_file, path) - - # check if download url will be generated from s3 or not from config - if use_s3_to_upload: - file_transfer_obj = S3FileTransfer() - download_url = file_transfer_obj.upload(zip_temp_path, exportname) - # watches the status code of the link provided and deletes the file if it is 200 - background_tasks.add_task(watch_s3_upload, download_url, zip_temp_path) - else: - - # getting from config in case api and frontend is not hosted on same url - client_host = config.get( - "API_CONFIG", "api_host", fallback=f"""{request.url.scheme}://{request.client.host}""") - client_port = config.get("API_CONFIG", "api_port", fallback=8000) - - if client_port: - download_url = f"""{client_host}:{client_port}/v1/exports/{exportname}.zip""" # disconnected download portion from this endpoint because when there will be multiple hits at a same time we don't want function to get stuck waiting for user to download the file and deliver the response , we want to reduce waiting time and free function ! - else: - download_url = f"""{client_host}/v1/exports/{exportname}.zip""" # disconnected download portion from this endpoint because when there will be multiple hits at a same time we don't want function to get stuck waiting for user to download the file and deliver the response , we want to reduce waiting time and free function ! - - # getting file size of zip , units are in bytes converted to mb in response - zip_file_size = os.path.getsize(zip_temp_path) - response_time = dt.now() - start_time - response_time_str = str(response_time) - logging.info( - f"Done Export : {exportname} of {round(inside_file_size/1000000)} MB / {geom_area} sqkm in {response_time_str}") - - return {"download_url": download_url, "file_name": exportname, "response_time": response_time_str, "query_area": f"""{geom_area} Sq Km """, "binded_file_size": f"""{round(inside_file_size/1000000)} MB""", "zip_file_size_bytes": {zip_file_size}} + # def get_current_data(params:RawDataCurrentParams,background_tasks: BackgroundTasks, user_data=Depends(login_required)): # this will use osm login makes it restrict login + task = process_raw_data(request, params, background_tasks) + return JSONResponse({"task_id": task.id}) @router.get("/status/") @@ -294,39 +224,3 @@ def check_current_db_status(): result = RawData().check_status() response = f"{result} ago" return {"last_updated": response} - - -def remove_file(path: str) -> None: - """Used for removing temp file dir and its all content after zip file is delivered to user - """ - try: - shutil.rmtree(path) - except OSError as ex: - logging.error("Error: %s - %s.", ex.filename, ex.strerror) - - -def watch_s3_upload(url: str, path: str) -> None: - """Watches upload of s3 either it is completed or not and removes the temp file after completion - - Args: - url (_type_): url generated by the script where data will be available - path (_type_): path where temp file is located at - """ - start_time = time.time() - remove_temp_file = True - check_call = requests.head(url).status_code - if check_call != 200: - logging.debug("Upload is not done yet waiting ...") - while check_call != 200: # check until status is not green - check_call = requests.head(url).status_code - if time.time() - start_time > 300: - logging.error( - "Upload time took more than 5 min , Killing watch : %s , URL : %s", path, url) - remove_temp_file = False # don't remove the file if upload fails - break - time.sleep(3) # check each 3 second - # once it is verfied file is uploaded finally remove the file - if remove_temp_file: - logging.debug( - "File is uploaded at %s , flushing out from %s", url, path) - os.unlink(path) diff --git a/requirements.txt b/requirements.txt index 033c0415..dc218d8e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -28,5 +28,8 @@ orjson==3.6.7 boto3==1.24.38 Fiona==1.8.21 fastapi-versioning==0.10.0 -#gdal and ogr2ogr is required on the machine to run rawdata endpoint +redis==4.3.4 +celery==5.2.7 +flower==1.2.0 +#gdal and ogr2ogr is required on the machine to run rawdata endpoint # gdal == 3.3.2 From 0a962569fc662e26b0390be4093eec7617168e11 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Wed, 21 Sep 2022 14:58:58 +0545 Subject: [PATCH 002/153] added background task --- API/api_worker.py | 6 +++--- API/raw_data.py | 31 +++++++++++++++---------------- requirements.txt | 2 +- 3 files changed, 19 insertions(+), 20 deletions(-) diff --git a/API/api_worker.py b/API/api_worker.py index 9cceb7cb..2cb4fefc 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -1,5 +1,4 @@ import os -import time import pathlib import orjson import shutil @@ -10,7 +9,7 @@ from src.galaxy.config import config from fastapi.responses import JSONResponse from src.galaxy.query_builder.builder import format_file_name_str -from src.galaxy.validation.models import RawDataCurrentParams, RawDataOutputType +from src.galaxy.validation.models import RawDataOutputType from src.galaxy.app import RawData, S3FileTransfer from src.galaxy.config import use_s3_to_upload, logger as logging, config @@ -62,7 +61,8 @@ def process_raw_data(request, params, background_tasks): zf.write(file_path, arcname=file_path.name) # Compressing geojson file - zf.writestr("clipping_boundary.geojson", orjson.dumps(dict(params.geometry))) + zf.writestr("clipping_boundary.geojson", + orjson.dumps(dict(params.geometry))) zf.close() logging.debug("Zip Binding Done !") diff --git a/API/raw_data.py b/API/raw_data.py index 7ac9c3da..d68d92d6 100644 --- a/API/raw_data.py +++ b/API/raw_data.py @@ -19,29 +19,17 @@ """[Router Responsible for Raw data API ] """ -import os -from datetime import datetime as dt -from uuid import uuid4 -import time -import zipfile -import requests # from .auth import login_required -import pathlib -import shutil from starlette.background import BackgroundTasks -import orjson - from fastapi import APIRouter, Request from fastapi_versioning import version from fastapi.responses import JSONResponse # from fastapi import APIRouter, Depends, Request -from src.galaxy.query_builder.builder import format_file_name_str -from src.galaxy.validation.models import RawDataCurrentParams, RawDataOutputType -from src.galaxy.app import RawData, S3FileTransfer - -from src.galaxy.config import use_s3_to_upload, logger as logging, config +from src.galaxy.validation.models import RawDataCurrentParams +from src.galaxy.app import RawData +from celery.result import AsyncResult from api_worker import process_raw_data router = APIRouter(prefix="/raw-data") @@ -214,7 +202,18 @@ def get_current_data( """ # def get_current_data(params:RawDataCurrentParams,background_tasks: BackgroundTasks, user_data=Depends(login_required)): # this will use osm login makes it restrict login task = process_raw_data(request, params, background_tasks) - return JSONResponse({"task_id": task.id}) + return JSONResponse({"task_id": task.id, "track_link": f"/current-snapshot/tasks/{task.id}/"}) + + +@router.get("/current-snapshot/tasks/{task_id}/") +def get_status(task_id): + task_result = AsyncResult(task_id) + result = { + "task_id": task_id, + "task_status": task_result.status, + "task_result": task_result.result + } + return JSONResponse(result) @router.get("/status/") diff --git a/requirements.txt b/requirements.txt index dc218d8e..4a04a025 100644 --- a/requirements.txt +++ b/requirements.txt @@ -26,7 +26,7 @@ sphinx==4.2.0 area==1.1.1 orjson==3.6.7 boto3==1.24.38 -Fiona==1.8.21 +# Fiona==1.8.21 fastapi-versioning==0.10.0 redis==4.3.4 celery==5.2.7 From 1f351ab15720a245947096003f637d8d726b9c58 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Wed, 21 Sep 2022 19:09:08 +0545 Subject: [PATCH 003/153] added pickle serializer --- API/api_worker.py | 7 +++++-- API/raw_data.py | 4 ++-- 2 files changed, 7 insertions(+), 4 deletions(-) diff --git a/API/api_worker.py b/API/api_worker.py index 2cb4fefc..8f734b0a 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -21,9 +21,12 @@ "CELERY", "CELERY_RESULT_BACKEND", fallback="redis://localhost:6379" ) # using redis as backend , make sure you have redis server started on your system on port 6379 +celery.conf.task_serializer = 'pickle' +celery.conf.result_serializer = 'pickle' +celery.conf.accept_content = ['application/json', 'application/x-python-serialize'] @celery.task(name="process_raw_data") -def process_raw_data(request, params, background_tasks): +def process_raw_data(incoming_scheme, incoming_host, params, background_tasks): start_time = dt.now() if ( params.output_type is None @@ -85,7 +88,7 @@ def process_raw_data(request, params, background_tasks): client_host = config.get( "API_CONFIG", "api_host", - fallback=f"""{request.url.scheme}://{request.client.host}""", + fallback=f"""{incoming_scheme}://{incoming_host}""", ) client_port = config.get("API_CONFIG", "api_port", fallback=8000) diff --git a/API/raw_data.py b/API/raw_data.py index d68d92d6..32897a24 100644 --- a/API/raw_data.py +++ b/API/raw_data.py @@ -30,7 +30,7 @@ from src.galaxy.validation.models import RawDataCurrentParams from src.galaxy.app import RawData from celery.result import AsyncResult -from api_worker import process_raw_data +from .api_worker import process_raw_data router = APIRouter(prefix="/raw-data") @@ -201,7 +201,7 @@ def get_current_data( """ # def get_current_data(params:RawDataCurrentParams,background_tasks: BackgroundTasks, user_data=Depends(login_required)): # this will use osm login makes it restrict login - task = process_raw_data(request, params, background_tasks) + task = process_raw_data.delay(request.url.scheme, request.client.host, params, background_tasks) return JSONResponse({"task_id": task.id, "track_link": f"/current-snapshot/tasks/{task.id}/"}) From 2dcc8a44ab9491d3e923a73290e1ce4aced40f5c Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Wed, 21 Sep 2022 20:35:35 +0545 Subject: [PATCH 004/153] fixed return error too --- API/api_worker.py | 13 +++++++------ API/raw_data.py | 9 +++++---- 2 files changed, 12 insertions(+), 10 deletions(-) diff --git a/API/api_worker.py b/API/api_worker.py index 8f734b0a..f48080e0 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -22,11 +22,12 @@ ) # using redis as backend , make sure you have redis server started on your system on port 6379 celery.conf.task_serializer = 'pickle' -celery.conf.result_serializer = 'pickle' +celery.conf.result_serializer = 'json' celery.conf.accept_content = ['application/json', 'application/x-python-serialize'] + @celery.task(name="process_raw_data") -def process_raw_data(incoming_scheme, incoming_host, params, background_tasks): +def process_raw_data(incoming_scheme, incoming_host, params): start_time = dt.now() if ( params.output_type is None @@ -76,7 +77,7 @@ def process_raw_data(incoming_scheme, incoming_host, params, background_tasks): inside_file_size += os.path.getsize(temp_file) # remove the file that are just binded to zip file , we no longer need to store it - background_tasks.add_task(remove_file, path) + remove_file(path) # check if download url will be generated from s3 or not from config if use_s3_to_upload: @@ -109,9 +110,9 @@ def process_raw_data(incoming_scheme, incoming_host, params, background_tasks): "download_url": download_url, "file_name": exportname, "response_time": response_time_str, - "query_area": f"""{geom_area} Sq Km """, - "binded_file_size": f"""{round(inside_file_size/1000000)} MB""", - "zip_file_size_bytes": {zip_file_size}, + "query_area": f"{geom_area} Sq Km ", + "binded_file_size": f"{round(inside_file_size/1000000)} MB", + "zip_file_size_bytes": zip_file_size, } diff --git a/API/raw_data.py b/API/raw_data.py index 32897a24..5468ca82 100644 --- a/API/raw_data.py +++ b/API/raw_data.py @@ -30,7 +30,7 @@ from src.galaxy.validation.models import RawDataCurrentParams from src.galaxy.app import RawData from celery.result import AsyncResult -from .api_worker import process_raw_data +from .api_worker import process_raw_data, celery router = APIRouter(prefix="/raw-data") @@ -44,7 +44,7 @@ @router.post("/current-snapshot/") @version(1) def get_current_data( - params: RawDataCurrentParams, background_tasks: BackgroundTasks, request: Request + params: RawDataCurrentParams, request: Request ): """Generates the current raw OpenStreetMap data available on database based on the input geometry, query and spatial features @@ -201,13 +201,14 @@ def get_current_data( """ # def get_current_data(params:RawDataCurrentParams,background_tasks: BackgroundTasks, user_data=Depends(login_required)): # this will use osm login makes it restrict login - task = process_raw_data.delay(request.url.scheme, request.client.host, params, background_tasks) + task = process_raw_data.delay(request.url.scheme, request.client.host, params) return JSONResponse({"task_id": task.id, "track_link": f"/current-snapshot/tasks/{task.id}/"}) @router.get("/current-snapshot/tasks/{task_id}/") def get_status(task_id): - task_result = AsyncResult(task_id) + task_result = AsyncResult(task_id, app=celery) + print(task_result) result = { "task_id": task_id, "task_status": task_result.status, From bf62819306cea59f8ecaae8b1f63a695c413e334 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Wed, 21 Sep 2022 21:16:41 +0545 Subject: [PATCH 005/153] added task id to export name itself so that it will be easy to track --- API/api_worker.py | 13 ++++++------- API/raw_data.py | 1 - 2 files changed, 6 insertions(+), 8 deletions(-) diff --git a/API/api_worker.py b/API/api_worker.py index f48080e0..a24b812a 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -3,7 +3,6 @@ import orjson import shutil from datetime import datetime as dt -from uuid import uuid4 import zipfile from celery import Celery from src.galaxy.config import config @@ -26,8 +25,8 @@ celery.conf.accept_content = ['application/json', 'application/x-python-serialize'] -@celery.task(name="process_raw_data") -def process_raw_data(incoming_scheme, incoming_host, params): +@celery.task(bind=True,name="process_raw_data") +def process_raw_data(self, incoming_scheme, incoming_host, params): start_time = dt.now() if ( params.output_type is None @@ -38,12 +37,12 @@ def process_raw_data(incoming_scheme, incoming_host, params): if params.file_name: # need to format string from space to _ because it is filename , may be we need to filter special character as well later on formatted_file_name = format_file_name_str(params.file_name) - # exportname = f"{formatted_file_name}_{datetime.now().isoformat()}_{str(uuid4())}" - exportname = f"""{formatted_file_name}_{str(uuid4())}_{params.output_type}""" # disabled date for now + # exportname = f"{formatted_file_name}_{datetime.now().isoformat()}_{str(self.request.id)}" + exportname = f"""{formatted_file_name}_{str(self.request.id)}_{params.output_type}""" # disabled date for now else: - # exportname = f"Raw_Export_{datetime.now().isoformat()}_{str(uuid4())}" - exportname = f"Raw_Export_{str(uuid4())}_{params.output_type}" + # exportname = f"Raw_Export_{datetime.now().isoformat()}_{str(self.request.id)}" + exportname = f"Raw_Export_{str(self.request.id)}_{params.output_type}" logging.info("Request %s received", exportname) diff --git a/API/raw_data.py b/API/raw_data.py index 5468ca82..852ed05e 100644 --- a/API/raw_data.py +++ b/API/raw_data.py @@ -208,7 +208,6 @@ def get_current_data( @router.get("/current-snapshot/tasks/{task_id}/") def get_status(task_id): task_result = AsyncResult(task_id, app=celery) - print(task_result) result = { "task_id": task_id, "task_status": task_result.status, From db168d741cd52d28a042047be51601ea8c4a9af4 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Thu, 22 Sep 2022 09:38:57 +0545 Subject: [PATCH 006/153] updated readme --- README.md | 48 ++++++++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 46 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index c420123a..279775e6 100644 --- a/README.md +++ b/README.md @@ -13,6 +13,11 @@ sudo apt-add-repository ppa:ubuntugis/ubuntugis-unstable sudo apt-get update sudo apt-get install gdal-bin libgdal-dev ``` +Install [redis](https://redis.io/docs/getting-started/installation/) on your system + +``` +sudo apt-get install redis +``` Clone the Repo to your machine @@ -125,6 +130,16 @@ AWS_ACCESS_KEY_ID= your id AWS_SECRET_ACCESS_KEY= yourkey BUCKET_NAME= your bucket name ``` + +Celery Configuration options: + +Galaxy API uses Celery 5 and Redis for task queue management , Currently implemented for Rawdata endpoint. 6379 is the default port , You can change the port according to your configuration + +``` +[CELERY] +CELERY_BROKER_URL=redis://localhost:6379 +CELERY_RESULT_BACKEND=redis://localhost:6379 +``` ##### Setup Tasking Manager Database for TM related development You can setup [Tasking manager](https://github.com/hotosm/tasking-manager) and add those block to config.txt @@ -139,12 +154,41 @@ port= ```uvicorn API.main:app --reload``` -### 9. Navigate to Fast API Docs to get details about API Endpoint +### 9. Check Redis server + +Check redis is running on your machine + +```sudo systemctl status redis``` + +Login to redis cli + +```redis-cli``` + +Hit ```ping``` it should return pong + +If REDIS is not running check out its [documentation](https://redis.io/docs/getting-started/) + +### 10. Start Celery Worker +You should be able to start [celery](https://docs.celeryq.dev/en/stable/getting-started/first-steps-with-celery.html#running-the-celery-worker-server) worker by running following command on different shell + +```celery --app API.api_worker worker --loglevel=INFO``` + +### 11 . [OPTIONAL] Start flower for monitoring queue + +API uses flower for monitoring the Celery distributed queue. Run this command on different shell + +```celery --app API.api_worker flower --port=5555 --broker=redis://redis:6379/``` + +### 12. Navigate to Fast API Docs to get details about API Endpoint After sucessfully running server , hit [this](http://127.0.0.1:8000/latest/docs) URL on your browser ```http://127.0.0.1:8000/latest/docs``` +Flower dashboard should be available on following + +http://127.0.0.1:5555/ + ### Check Authetication 1. Hit /auth/login/ @@ -160,7 +204,7 @@ INSERT INTO users_roles VALUES (ID, 1); Repeat the steps to get a new access_token. -#### API has been setup successfully ! +#### API has been setup successfully ! ## Run tests From 8708f292b82f06500f5571fb8729bfacd4b97887 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Thu, 22 Sep 2022 10:10:17 +0545 Subject: [PATCH 007/153] applied versioning on rawdata snapshot --- API/raw_data.py | 357 +++++++++++++++++++++++++++++++++++++++++++++--- README.md | 6 +- 2 files changed, 340 insertions(+), 23 deletions(-) diff --git a/API/raw_data.py b/API/raw_data.py index 852ed05e..401e62b7 100644 --- a/API/raw_data.py +++ b/API/raw_data.py @@ -19,19 +19,30 @@ """[Router Responsible for Raw data API ] """ - +import os +from datetime import datetime as dt +from uuid import uuid4 +import time +import zipfile +import requests # from .auth import login_required +import pathlib +import shutil from starlette.background import BackgroundTasks +import orjson + from fastapi import APIRouter, Request from fastapi_versioning import version from fastapi.responses import JSONResponse - # from fastapi import APIRouter, Depends, Request -from src.galaxy.validation.models import RawDataCurrentParams -from src.galaxy.app import RawData +from src.galaxy.query_builder.builder import format_file_name_str +from src.galaxy.validation.models import RawDataCurrentParams, RawDataOutputType +from src.galaxy.app import RawData, S3FileTransfer from celery.result import AsyncResult from .api_worker import process_raw_data, celery +from src.galaxy.config import use_s3_to_upload, logger as logging, config + router = APIRouter(prefix="/raw-data") # @router.post("/historical-snapshot/") @@ -43,9 +54,7 @@ @router.post("/current-snapshot/") @version(1) -def get_current_data( - params: RawDataCurrentParams, request: Request -): +def get_current_snapshot_osm_data(params: RawDataCurrentParams, background_tasks: BackgroundTasks, request: Request): """Generates the current raw OpenStreetMap data available on database based on the input geometry, query and spatial features Args: @@ -200,20 +209,84 @@ def get_current_data( } """ - # def get_current_data(params:RawDataCurrentParams,background_tasks: BackgroundTasks, user_data=Depends(login_required)): # this will use osm login makes it restrict login - task = process_raw_data.delay(request.url.scheme, request.client.host, params) - return JSONResponse({"task_id": task.id, "track_link": f"/current-snapshot/tasks/{task.id}/"}) +# def get_current_data(params:RawDataCurrentParams,background_tasks: BackgroundTasks, user_data=Depends(login_required)): # this will use osm login makes it restrict login + start_time = dt.now() + if params.output_type is None: # if no ouput type is supplied default is geojson output + params.output_type = RawDataOutputType.GEOJSON.value + # unique id for zip file and geojson for each export + if params.file_name: + # need to format string from space to _ because it is filename , may be we need to filter special character as well later on + formatted_file_name = format_file_name_str(params.file_name) + # exportname = f"{formatted_file_name}_{datetime.now().isoformat()}_{str(uuid4())}" + exportname = f"""{formatted_file_name}_{str(uuid4())}_{params.output_type}""" # disabled date for now -@router.get("/current-snapshot/tasks/{task_id}/") -def get_status(task_id): - task_result = AsyncResult(task_id, app=celery) - result = { - "task_id": task_id, - "task_status": task_result.status, - "task_result": task_result.result - } - return JSONResponse(result) + else: + # exportname = f"Raw_Export_{datetime.now().isoformat()}_{str(uuid4())}" + exportname = f"Raw_Export_{str(uuid4())}_{params.output_type}" + + logging.info("Request %s received", exportname) + + dump_temp_file, geom_area, root_dir_file = RawData( + params).extract_current_data(exportname) + path = f"""{root_dir_file}{exportname}/""" + + if os.path.exists(path) is False: + return JSONResponse( + status_code=400, + content={"Error": "Request went too big"} + ) + + logging.debug('Zip Binding Started !') + # saving file in temp directory instead of memory so that zipping file will not eat memory + zip_temp_path = f"""{root_dir_file}{exportname}.zip""" + zf = zipfile.ZipFile(zip_temp_path, "w", zipfile.ZIP_DEFLATED) + + directory = pathlib.Path(path) + for file_path in directory.iterdir(): + zf.write(file_path, arcname=file_path.name) + + # Compressing geojson file + zf.writestr("clipping_boundary.geojson", + orjson.dumps(dict(params.geometry))) + + zf.close() + logging.debug('Zip Binding Done !') + inside_file_size = 0 + for temp_file in dump_temp_file: + # clearing tmp geojson file since it is already dumped to zip file we don't need it anymore + if os.path.exists(temp_file): + inside_file_size += os.path.getsize(temp_file) + + # remove the file that are just binded to zip file , we no longer need to store it + background_tasks.add_task(remove_file, path) + + # check if download url will be generated from s3 or not from config + if use_s3_to_upload: + file_transfer_obj = S3FileTransfer() + download_url = file_transfer_obj.upload(zip_temp_path, exportname) + # watches the status code of the link provided and deletes the file if it is 200 + background_tasks.add_task(watch_s3_upload, download_url, zip_temp_path) + else: + + # getting from config in case api and frontend is not hosted on same url + client_host = config.get( + "API_CONFIG", "api_host", fallback=f"""{request.url.scheme}://{request.client.host}""") + client_port = config.get("API_CONFIG", "api_port", fallback=8000) + + if client_port: + download_url = f"""{client_host}:{client_port}/v1/exports/{exportname}.zip""" # disconnected download portion from this endpoint because when there will be multiple hits at a same time we don't want function to get stuck waiting for user to download the file and deliver the response , we want to reduce waiting time and free function ! + else: + download_url = f"""{client_host}/v1/exports/{exportname}.zip""" # disconnected download portion from this endpoint because when there will be multiple hits at a same time we don't want function to get stuck waiting for user to download the file and deliver the response , we want to reduce waiting time and free function ! + + # getting file size of zip , units are in bytes converted to mb in response + zip_file_size = os.path.getsize(zip_temp_path) + response_time = dt.now() - start_time + response_time_str = str(response_time) + logging.info( + f"Done Export : {exportname} of {round(inside_file_size/1000000)} MB / {geom_area} sqkm in {response_time_str}") + + return {"download_url": download_url, "file_name": exportname, "response_time": response_time_str, "query_area": f"""{geom_area} Sq Km """, "binded_file_size": f"""{round(inside_file_size/1000000)} MB""", "zip_file_size_bytes": {zip_file_size}} @router.get("/status/") @@ -223,3 +296,249 @@ def check_current_db_status(): result = RawData().check_status() response = f"{result} ago" return {"last_updated": response} + + +def remove_file(path: str) -> None: + """Used for removing temp file dir and its all content after zip file is delivered to user + """ + try: + shutil.rmtree(path) + except OSError as ex: + logging.error("Error: %s - %s.", ex.filename, ex.strerror) + + +def watch_s3_upload(url: str, path: str) -> None: + """Watches upload of s3 either it is completed or not and removes the temp file after completion + + Args: + url (_type_): url generated by the script where data will be available + path (_type_): path where temp file is located at + """ + start_time = time.time() + remove_temp_file = True + check_call = requests.head(url).status_code + if check_call != 200: + logging.debug("Upload is not done yet waiting ...") + while check_call != 200: # check until status is not green + check_call = requests.head(url).status_code + if time.time() - start_time > 300: + logging.error( + "Upload time took more than 5 min , Killing watch : %s , URL : %s", path, url) + remove_temp_file = False # don't remove the file if upload fails + break + time.sleep(3) # check each 3 second + # once it is verfied file is uploaded finally remove the file + if remove_temp_file: + logging.debug( + "File is uploaded at %s , flushing out from %s", url, path) + os.unlink(path) + + +@router.post("/current-snapshot/") +@version(2) +def get_current_snapshot_of_osm_data( + params: RawDataCurrentParams, request: Request +): + """Generates the current raw OpenStreetMap data available on database based on the input geometry, query and spatial features + + Steps to Run Snapshot : + + 1. Post the your request here and your request will be on queue, endpoint will return as following : + { + "task_id": "your task_id", + "track_link": "/current-snapshot/tasks/task_id/" + } + 2. Now navigate to /current-snapshot/tasks/ with your task id to track progress and result + + Args: + + params (RawDataCurrentParams): + { + "outputType": "GeoJSON", + "fileName": "string", + "geometry": { # only polygon is supported ** required field ** + "coordinates": [ + [ + [ + 1,0 + ], + [ + 2,0 + ] + ] + ], + "type": "Polygon" + }, + "filters" : { + "tags": { # tags filter controls no of rows returned + "point" : {"amenity":["shop"]}, + "line" : {}, + "polygon" : {"key":["value1","value2"],"key2":["value1"]}, + "all_geometry" : {"building":['yes']} # master filter applied to all of the geometries selected on geometryType + }, + "attributes": { # attribute column controls associated k-v pairs returned + "point": [], column + "line" : [], + "polygon" : [], + "all_geometry" : ["name","address"], # master field applied to all geometries selected on geometryType + } + }, + "geometryType": [ + "point","line","polygon" + ] + } + background_tasks (BackgroundTasks): task to cleanup the files produced during export + request (Request): request instance + + Returns : + { + "task_id": "7d241e47-ffd6-405c-9312-614593f77b14", + "track_link": "/current-snapshot/tasks/7d241e47-ffd6-405c-9312-614593f77b14/" + } + + Sample Query : + 1. Sample query to extract point and polygon features that are marked building=* with name attribute + { + "outputType": "GeoJSON", + "fileName": "Pokhara_buildings", + "geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 83.96919250488281, + 28.194446860487773 + ], + [ + 83.99751663208006, + 28.194446860487773 + ], + [ + 83.99751663208006, + 28.214869548073377 + ], + [ + 83.96919250488281, + 28.214869548073377 + ], + [ + 83.96919250488281, + 28.194446860487773 + ] + ] + ] + }, + "filters": {"tags":{"all_geometry":{"building":[]}},"attributes":{"all_geometry":["name"]}}, + "geometryType": [ + "point","polygon" + ] + } + 2. Query to extract all OpenStreetMap features in a polygon in shapefile format: + { + "outputType": "shp", + "fileName": "Pokhara_all_features", + "geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 83.96919250488281, + 28.194446860487773 + ], + [ + 83.99751663208006, + 28.194446860487773 + ], + [ + 83.99751663208006, + 28.214869548073377 + ], + [ + 83.96919250488281, + 28.214869548073377 + ], + [ + 83.96919250488281, + 28.194446860487773 + ] + ] + ] + } + } + 3. Clean query to extract all features by deafult; output will be same as 2nd query but in GeoJSON format and output name will be `default` + { + "geometry": { + "type": "Polygon", + "coordinates": [ + [ + [ + 83.96919250488281, + 28.194446860487773 + ], + [ + 83.99751663208006, + 28.194446860487773 + ], + [ + 83.99751663208006, + 28.214869548073377 + ], + [ + 83.96919250488281, + 28.214869548073377 + ], + [ + 83.96919250488281, + 28.194446860487773 + ] + ] + ] + } + } + + """ + # def get_current_data(params:RawDataCurrentParams,background_tasks: BackgroundTasks, user_data=Depends(login_required)): # this will use osm login makes it restrict login + task = process_raw_data.delay(request.url.scheme, request.client.host, params) + return JSONResponse({"task_id": task.id, "track_link": f"/current-snapshot/tasks/{task.id}/"}) + + +@router.get("/current-snapshot/tasks/{task_id}/") +@version(2) +def get_task_status(task_id): + """Tracks the request from the task id provided by galaxy api for the request + + Args: + + task_id ([type]): [Unique id provided on response from /current-snapshot/] + + Returns: + + id: Id of the task + status : SUCCESS / PENDING + result : Result of task + + Successful task will have additional nested json inside row as following : + Example response of rawdata current snapshot response : + + + { + "id": "3fded368-456f-4ef4-a1b8-c099a7f77ca4", + "status": "SUCCESS", + "result": { + "download_url": "https://s3.us-east-1.amazonaws.com/exports-stage.hotosm.org/Raw_Export_3fded368-456f-4ef4-a1b8-c099a7f77ca4_GeoJSON.zip", + "file_name": "Raw_Export_3fded368-456f-4ef4-a1b8-c099a7f77ca4_GeoJSON", + "response_time": "0:00:12.175976", + "query_area": "6 Sq Km ", + "binded_file_size": "7 MB", + "zip_file_size_bytes": 1331601 + + } + + """ + task_result = AsyncResult(task_id, app=celery) + result = { + "id": task_id, + "status": task_result.status, + "result": task_result.result + } + return JSONResponse(result) diff --git a/README.md b/README.md index 279775e6..6b956749 100644 --- a/README.md +++ b/README.md @@ -177,7 +177,7 @@ You should be able to start [celery](https://docs.celeryq.dev/en/stable/getting- API uses flower for monitoring the Celery distributed queue. Run this command on different shell -```celery --app API.api_worker flower --port=5555 --broker=redis://redis:6379/``` +```celery --app API.api_worker flower --port=5550 --broker=redis://redis:6379/``` ### 12. Navigate to Fast API Docs to get details about API Endpoint @@ -185,9 +185,7 @@ After sucessfully running server , hit [this](http://127.0.0.1:8000/latest/docs) ```http://127.0.0.1:8000/latest/docs``` -Flower dashboard should be available on following - -http://127.0.0.1:5555/ +Flower dashboard should be available on 5550 localhost port. ### Check Authetication From 8ddfd34ac8a2a2d80fa48dc511e998db3fc0b34d Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Thu, 22 Sep 2022 10:39:38 +0545 Subject: [PATCH 008/153] updated version of click --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 4a04a025..36b9526e 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,6 +1,6 @@ aiofiles==0.7.0 asgiref==3.3.4 -click==8.0.1 +click==8.0.3 fastapi==0.65.2 h11==0.12.0 importlib-metadata==4.5.0 From 59ce9bb83a899f2363277ef90d91f4a5019d1e75 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Thu, 22 Sep 2022 10:46:27 +0545 Subject: [PATCH 009/153] enabled fiona --- requirements.txt | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/requirements.txt b/requirements.txt index 36b9526e..9d0a5f1b 100644 --- a/requirements.txt +++ b/requirements.txt @@ -26,7 +26,7 @@ sphinx==4.2.0 area==1.1.1 orjson==3.6.7 boto3==1.24.38 -# Fiona==1.8.21 +Fiona==1.8.21 fastapi-versioning==0.10.0 redis==4.3.4 celery==5.2.7 From 39594231dabc149e92f6e3027ed35ab473304902 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 13:23:21 +0545 Subject: [PATCH 010/153] added docker workflow --- Docker/Dockerfile => Dockerfile | 5 ++-- docker-compose.yml | 34 +++++++++++++++++++++++++++ Docker/entrypoint.sh => entrypoint.sh | 3 +-- requirements.docker.txt | 9 ++++--- src/config.txt.sample | 16 ++++++++----- 5 files changed, 54 insertions(+), 13 deletions(-) rename Docker/Dockerfile => Dockerfile (83%) create mode 100644 docker-compose.yml rename Docker/entrypoint.sh => entrypoint.sh (93%) diff --git a/Docker/Dockerfile b/Dockerfile similarity index 83% rename from Docker/Dockerfile rename to Dockerfile index c46cb72b..b03a89a9 100644 --- a/Docker/Dockerfile +++ b/Dockerfile @@ -14,9 +14,10 @@ RUN pip install --upgrade pip RUN pip install -r requirements.docker.txt RUN pip install -e . -RUN chmod +x Docker/entrypoint.sh +COPY /entrypoint.sh /entrypoint.sh +RUN chmod +x /entrypoint.sh #CMD ["uvicorn", "API.main:app", "--reload", "--host", "0.0.0.0", "--port", "8000"] -ENTRYPOINT ["Docker/entrypoint.sh"] +ENTRYPOINT ["/entrypoint.sh"] HEALTHCHECK CMD curl -f http://localhost:8000 || exit 1 diff --git a/docker-compose.yml b/docker-compose.yml new file mode 100644 index 00000000..45b76ded --- /dev/null +++ b/docker-compose.yml @@ -0,0 +1,34 @@ +version: '3.8' + +services: + + app: + build: . + ports: + - 8000:8000 + volumes: + - .:/app + depends_on: + - redis + + worker: + build: . + command: celery --app API.api_worker worker --loglevel=INFO + volumes: + - .:/app + depends_on: + - app + - redis + + redis: + image: redis:6-alpine + + worker-dashboard: + build: . + command: celery --app API.api_worker flower --port=5550 --broker=redis://redis:6379/ + ports: + - 5550:5550 + depends_on: + - app + - redis + - worker \ No newline at end of file diff --git a/Docker/entrypoint.sh b/entrypoint.sh similarity index 93% rename from Docker/entrypoint.sh rename to entrypoint.sh index da1b9d3e..9d0b563d 100644 --- a/Docker/entrypoint.sh +++ b/entrypoint.sh @@ -1,5 +1,4 @@ -#!/usr/bin/env bash - +#!/bin/sh if [[ -z "${GALAXY_API_CONFIG_FILE}" ]]; then printf "Error: GALAXY_API_CONFIG_FILE environment variable missing.. Exiting!" exit 1 diff --git a/requirements.docker.txt b/requirements.docker.txt index 8ad35583..9d0a5f1b 100644 --- a/requirements.docker.txt +++ b/requirements.docker.txt @@ -1,6 +1,6 @@ aiofiles==0.7.0 asgiref==3.3.4 -click==8.0.1 +click==8.0.3 fastapi==0.65.2 h11==0.12.0 importlib-metadata==4.5.0 @@ -28,5 +28,8 @@ orjson==3.6.7 boto3==1.24.38 Fiona==1.8.21 fastapi-versioning==0.10.0 -#gdal and ogr2ogr is required on the machine to run rawdata endpoint -#gdal == 3.3.2 +redis==4.3.4 +celery==5.2.7 +flower==1.2.0 +#gdal and ogr2ogr is required on the machine to run rawdata endpoint +# gdal == 3.3.2 diff --git a/src/config.txt.sample b/src/config.txt.sample index 98174af1..a178dcc7 100644 --- a/src/config.txt.sample +++ b/src/config.txt.sample @@ -43,20 +43,24 @@ env=dev #export_path=exports/ # used to store export path #api_host=http://127.0.0.1 #api_port=8000 -#max_area=100000 # max area to support for rawdata input -#use_connection_pooling=True +#max_area=100000 # max area to support for rawdata input +#use_connection_pooling=True #log_level=info #options are info,debug,warning,error #env=dev # default is prod , supported values are dev and prod #shp_limit=6000 # in mb default is 4096 #[EXPORT_UPLOAD] -#FILE_UPLOAD_METHOD=disk # options are s3,disk -#AWS_ACCESS_KEY_ID= your id -#AWS_SECRET_ACCESS_KEY= yourkey -#BUCKET_NAME= your bucket name +#FILE_UPLOAD_METHOD=disk # options are s3,disk +#AWS_ACCESS_KEY_ID= your id +#AWS_SECRET_ACCESS_KEY= yourkey +#BUCKET_NAME= your bucket name #[TM] #host= #user= #password= #port= + +#[CELERY] +#CELERY_BROKER_URL=redis://localhost:6379 +#CELERY_RESULT_BACKEND=redis://localhost:6379 From 2f181c0d1616eded45813ec931d8292f82d5fced Mon Sep 17 00:00:00 2001 From: itskshtiiz321 Date: Thu, 22 Sep 2022 08:28:53 +0000 Subject: [PATCH 011/153] ignored all .out files --- .gitignore | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index ad2d58ab..2fd08af4 100644 --- a/.gitignore +++ b/.gitignore @@ -12,4 +12,4 @@ build newrelic.ini newrelic.ini_backup exports -nohup.out \ No newline at end of file +*.out \ No newline at end of file From 78b5d7e7e5d4991afc61e676b14f1a2da96989a2 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Thu, 22 Sep 2022 15:34:42 +0545 Subject: [PATCH 012/153] remove fiona since its no longer used --- requirements.docker.txt | 1 - requirements.txt | 1 - src/galaxy/app.py | 152 ++++++++++++++++++++-------------------- src/galaxy/config.py | 2 +- 4 files changed, 77 insertions(+), 79 deletions(-) diff --git a/requirements.docker.txt b/requirements.docker.txt index 9d0a5f1b..1bf94ede 100644 --- a/requirements.docker.txt +++ b/requirements.docker.txt @@ -26,7 +26,6 @@ sphinx==4.2.0 area==1.1.1 orjson==3.6.7 boto3==1.24.38 -Fiona==1.8.21 fastapi-versioning==0.10.0 redis==4.3.4 celery==5.2.7 diff --git a/requirements.txt b/requirements.txt index 9d0a5f1b..1bf94ede 100644 --- a/requirements.txt +++ b/requirements.txt @@ -26,7 +26,6 @@ sphinx==4.2.0 area==1.1.1 orjson==3.6.7 boto3==1.24.38 -Fiona==1.8.21 fastapi-versioning==0.10.0 redis==4.3.4 celery==5.2.7 diff --git a/src/galaxy/app.py b/src/galaxy/app.py index 760ae580..fb09751e 100644 --- a/src/galaxy/app.py +++ b/src/galaxy/app.py @@ -37,8 +37,8 @@ from area import area import subprocess from json import dumps -import fiona -from fiona.crs import from_epsg +# import fiona +# from fiona.crs import from_epsg import time import shutil import boto3 @@ -1029,80 +1029,80 @@ def query2geojson(con, extraction_query, dump_temp_file_path): f.write(post_geojson) logging.debug("Server side Query Result Post Processing Done") - @staticmethod - def query2shapefile(con, point_query, line_query, poly_query, point_schema, line_schema, poly_schema, dump_temp_file_path): - """Function that transfer db query to shp""" - # schema: it is a simple dictionary with geometry and properties as keys - # schema = {'geometry': 'LineString','properties': {'test': 'int'}} - file_paths = [] - if point_query: - logging.debug("Writing Point Shapefile") - - schema = {'geometry': 'Point', 'properties': point_schema, } - point_file_path = f"""{dump_temp_file_path}_point.shp""" - # open a fiona object - pointShp = fiona.open(point_file_path, mode='w', driver='ESRI Shapefile', encoding='UTF-8', - schema=schema, crs="EPSG:4326") - - with con.cursor(name='fetch_raw') as cursor: # using server side cursor - cursor.itersize = 1000 # chunk size to get 1000 row at a time in client side - cursor.execute(point_query) - for row in cursor: - pointShp.write(orjson.loads(row[0])) - - cursor.close() # closing connection to avoid memory issues - # close fiona object - pointShp.close() - file_paths.append(point_file_path) - file_paths.append(f"""{dump_temp_file_path}_point.shx""") - file_paths.append(f"""{dump_temp_file_path}_point.cpg""") - file_paths.append(f"""{dump_temp_file_path}_point.dbf""") - file_paths.append(f"""{dump_temp_file_path}_point.prj""") - - if line_query: - logging.debug("Writing Line Shapefile") - - schema = {'geometry': 'LineString', 'properties': line_schema, } - # print(schema) - line_file_path = f"""{dump_temp_file_path}_line.shp""" - with fiona.open(line_file_path, 'w', encoding='UTF-8', crs=from_epsg(4326), driver='ESRI Shapefile', schema=schema) as layer: - with con.cursor(name='fetch_raw') as cursor: # using server side cursor - cursor.itersize = 1000 # chunk size to get 1000 row at a time in client side - cursor.execute(line_query) - for row in cursor: - layer.write(orjson.loads(row[0])) - - cursor.close() # closing connection to avoid memory issues - # close fiona object - layer.close() - file_paths.append(line_file_path) - file_paths.append(f"""{dump_temp_file_path}_line.shx""") - file_paths.append(f"""{dump_temp_file_path}_line.cpg""") - file_paths.append(f"""{dump_temp_file_path}_line.dbf""") - file_paths.append(f"""{dump_temp_file_path}_line.prj""") - - if poly_query: - logging.debug("Writing Poly Shapefile") - - poly_file_path = f"""{dump_temp_file_path}_poly.shp""" - schema = {'geometry': 'Polygon', 'properties': poly_schema, } - - with fiona.open(poly_file_path, 'w', encoding='UTF-8', crs=from_epsg(4326), driver='ESRI Shapefile', schema=schema) as layer: - with con.cursor(name='fetch_raw') as cursor: # using server side cursor - cursor.itersize = 1000 # chunk size to get 1000 row at a time in client side - cursor.execute(poly_query) - for row in cursor: - layer.write(orjson.loads(row[0])) - - cursor.close() # closing connection to avoid memory issues - # close fiona object - layer.close() - file_paths.append(poly_file_path) - file_paths.append(f"""{dump_temp_file_path}_poly.shx""") - file_paths.append(f"""{dump_temp_file_path}_poly.cpg""") - file_paths.append(f"""{dump_temp_file_path}_poly.dbf""") - file_paths.append(f"""{dump_temp_file_path}_poly.prj""") - return file_paths + # @staticmethod + # def query2shapefile(con, point_query, line_query, poly_query, point_schema, line_schema, poly_schema, dump_temp_file_path): + # """Function that transfer db query to shp""" + # # schema: it is a simple dictionary with geometry and properties as keys + # # schema = {'geometry': 'LineString','properties': {'test': 'int'}} + # file_paths = [] + # if point_query: + # logging.debug("Writing Point Shapefile") + + # schema = {'geometry': 'Point', 'properties': point_schema, } + # point_file_path = f"""{dump_temp_file_path}_point.shp""" + # # open a fiona object + # pointShp = fiona.open(point_file_path, mode='w', driver='ESRI Shapefile', encoding='UTF-8', + # schema=schema, crs="EPSG:4326") + + # with con.cursor(name='fetch_raw') as cursor: # using server side cursor + # cursor.itersize = 1000 # chunk size to get 1000 row at a time in client side + # cursor.execute(point_query) + # for row in cursor: + # pointShp.write(orjson.loads(row[0])) + + # cursor.close() # closing connection to avoid memory issues + # # close fiona object + # pointShp.close() + # file_paths.append(point_file_path) + # file_paths.append(f"""{dump_temp_file_path}_point.shx""") + # file_paths.append(f"""{dump_temp_file_path}_point.cpg""") + # file_paths.append(f"""{dump_temp_file_path}_point.dbf""") + # file_paths.append(f"""{dump_temp_file_path}_point.prj""") + + # if line_query: + # logging.debug("Writing Line Shapefile") + + # schema = {'geometry': 'LineString', 'properties': line_schema, } + # # print(schema) + # line_file_path = f"""{dump_temp_file_path}_line.shp""" + # with fiona.open(line_file_path, 'w', encoding='UTF-8', crs=from_epsg(4326), driver='ESRI Shapefile', schema=schema) as layer: + # with con.cursor(name='fetch_raw') as cursor: # using server side cursor + # cursor.itersize = 1000 # chunk size to get 1000 row at a time in client side + # cursor.execute(line_query) + # for row in cursor: + # layer.write(orjson.loads(row[0])) + + # cursor.close() # closing connection to avoid memory issues + # # close fiona object + # layer.close() + # file_paths.append(line_file_path) + # file_paths.append(f"""{dump_temp_file_path}_line.shx""") + # file_paths.append(f"""{dump_temp_file_path}_line.cpg""") + # file_paths.append(f"""{dump_temp_file_path}_line.dbf""") + # file_paths.append(f"""{dump_temp_file_path}_line.prj""") + + # if poly_query: + # logging.debug("Writing Poly Shapefile") + + # poly_file_path = f"""{dump_temp_file_path}_poly.shp""" + # schema = {'geometry': 'Polygon', 'properties': poly_schema, } + + # with fiona.open(poly_file_path, 'w', encoding='UTF-8', crs=from_epsg(4326), driver='ESRI Shapefile', schema=schema) as layer: + # with con.cursor(name='fetch_raw') as cursor: # using server side cursor + # cursor.itersize = 1000 # chunk size to get 1000 row at a time in client side + # cursor.execute(poly_query) + # for row in cursor: + # layer.write(orjson.loads(row[0])) + + # cursor.close() # closing connection to avoid memory issues + # # close fiona object + # layer.close() + # file_paths.append(poly_file_path) + # file_paths.append(f"""{dump_temp_file_path}_poly.shx""") + # file_paths.append(f"""{dump_temp_file_path}_poly.cpg""") + # file_paths.append(f"""{dump_temp_file_path}_poly.dbf""") + # file_paths.append(f"""{dump_temp_file_path}_poly.prj""") + # return file_paths @staticmethod def get_grid_id(geom, cur): diff --git a/src/galaxy/config.py b/src/galaxy/config.py index d9cda9bb..356e1f27 100644 --- a/src/galaxy/config.py +++ b/src/galaxy/config.py @@ -44,7 +44,7 @@ "logging config is not supported , Supported fields are : debug,error,warning,info , Logging to default :debug") level = logging.DEBUG -logging.getLogger("fiona").propagate = False # disable fiona logging +# logging.getLogger("fiona").propagate = False # disable fiona logging logging.basicConfig(format='%(asctime)s - %(message)s', level=level) logging.getLogger('boto3').propagate = False # disable boto3 logging From f3041e9eb34aadad646f2e3b8c1b9c3bf0f8e642 Mon Sep 17 00:00:00 2001 From: itskshtiiz321 Date: Thu, 22 Sep 2022 10:01:07 +0000 Subject: [PATCH 013/153] added try except block --- API/api_worker.py | 172 ++++++++++++++++++++++++---------------------- API/raw_data.py | 2 + 2 files changed, 90 insertions(+), 84 deletions(-) diff --git a/API/api_worker.py b/API/api_worker.py index a24b812a..9a37106e 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -27,92 +27,96 @@ @celery.task(bind=True,name="process_raw_data") def process_raw_data(self, incoming_scheme, incoming_host, params): - start_time = dt.now() - if ( - params.output_type is None - ): # if no ouput type is supplied default is geojson output - params.output_type = RawDataOutputType.GEOJSON.value - - # unique id for zip file and geojson for each export - if params.file_name: - # need to format string from space to _ because it is filename , may be we need to filter special character as well later on - formatted_file_name = format_file_name_str(params.file_name) - # exportname = f"{formatted_file_name}_{datetime.now().isoformat()}_{str(self.request.id)}" - exportname = f"""{formatted_file_name}_{str(self.request.id)}_{params.output_type}""" # disabled date for now - - else: - # exportname = f"Raw_Export_{datetime.now().isoformat()}_{str(self.request.id)}" - exportname = f"Raw_Export_{str(self.request.id)}_{params.output_type}" - - logging.info("Request %s received", exportname) - - dump_temp_file, geom_area, root_dir_file = RawData(params).extract_current_data( - exportname - ) - path = f"""{root_dir_file}{exportname}/""" - - if os.path.exists(path) is False: - return JSONResponse(status_code=400, content={"Error": "Request went too big"}) - - logging.debug("Zip Binding Started !") - # saving file in temp directory instead of memory so that zipping file will not eat memory - zip_temp_path = f"""{root_dir_file}{exportname}.zip""" - zf = zipfile.ZipFile(zip_temp_path, "w", zipfile.ZIP_DEFLATED) - - directory = pathlib.Path(path) - for file_path in directory.iterdir(): - zf.write(file_path, arcname=file_path.name) - - # Compressing geojson file - zf.writestr("clipping_boundary.geojson", - orjson.dumps(dict(params.geometry))) - - zf.close() - logging.debug("Zip Binding Done !") - inside_file_size = 0 - for temp_file in dump_temp_file: - # clearing tmp geojson file since it is already dumped to zip file we don't need it anymore - if os.path.exists(temp_file): - inside_file_size += os.path.getsize(temp_file) - - # remove the file that are just binded to zip file , we no longer need to store it - remove_file(path) - - # check if download url will be generated from s3 or not from config - if use_s3_to_upload: - file_transfer_obj = S3FileTransfer() - download_url = file_transfer_obj.upload(zip_temp_path, exportname) - else: - - # getting from config in case api and frontend is not hosted on same url - client_host = config.get( - "API_CONFIG", - "api_host", - fallback=f"""{incoming_scheme}://{incoming_host}""", - ) - client_port = config.get("API_CONFIG", "api_port", fallback=8000) + try: + start_time = dt.now() + if ( + params.output_type is None + ): # if no ouput type is supplied default is geojson output + params.output_type = RawDataOutputType.GEOJSON.value + + # unique id for zip file and geojson for each export + if params.file_name: + # need to format string from space to _ because it is filename , may be we need to filter special character as well later on + formatted_file_name = format_file_name_str(params.file_name) + # exportname = f"{formatted_file_name}_{datetime.now().isoformat()}_{str(self.request.id)}" + exportname = f"""{formatted_file_name}_{str(self.request.id)}_{params.output_type}""" # disabled date for now + + else: + # exportname = f"Raw_Export_{datetime.now().isoformat()}_{str(self.request.id)}" + exportname = f"Raw_Export_{str(self.request.id)}_{params.output_type}" + + logging.info("Request %s received", exportname) - if client_port: - download_url = f"""{client_host}:{client_port}/v1/exports/{exportname}.zip""" # disconnected download portion from this endpoint because when there will be multiple hits at a same time we don't want function to get stuck waiting for user to download the file and deliver the response , we want to reduce waiting time and free function ! + dump_temp_file, geom_area, root_dir_file = RawData(params).extract_current_data( + exportname + ) + path = f"""{root_dir_file}{exportname}/""" + + if os.path.exists(path) is False: + return JSONResponse(status_code=400, content={"Error": "Request went too big"}) + + logging.debug("Zip Binding Started !") + # saving file in temp directory instead of memory so that zipping file will not eat memory + zip_temp_path = f"""{root_dir_file}{exportname}.zip""" + zf = zipfile.ZipFile(zip_temp_path, "w", zipfile.ZIP_DEFLATED) + + directory = pathlib.Path(path) + for file_path in directory.iterdir(): + zf.write(file_path, arcname=file_path.name) + + # Compressing geojson file + zf.writestr("clipping_boundary.geojson", + orjson.dumps(dict(params.geometry))) + + zf.close() + logging.debug("Zip Binding Done !") + inside_file_size = 0 + for temp_file in dump_temp_file: + # clearing tmp geojson file since it is already dumped to zip file we don't need it anymore + if os.path.exists(temp_file): + inside_file_size += os.path.getsize(temp_file) + + # remove the file that are just binded to zip file , we no longer need to store it + remove_file(path) + + # check if download url will be generated from s3 or not from config + if use_s3_to_upload: + file_transfer_obj = S3FileTransfer() + download_url = file_transfer_obj.upload(zip_temp_path, exportname) else: - download_url = f"""{client_host}/v1/exports/{exportname}.zip""" # disconnected download portion from this endpoint because when there will be multiple hits at a same time we don't want function to get stuck waiting for user to download the file and deliver the response , we want to reduce waiting time and free function ! - - # getting file size of zip , units are in bytes converted to mb in response - zip_file_size = os.path.getsize(zip_temp_path) - response_time = dt.now() - start_time - response_time_str = str(response_time) - logging.info( - f"Done Export : {exportname} of {round(inside_file_size/1000000)} MB / {geom_area} sqkm in {response_time_str}" - ) - - return { - "download_url": download_url, - "file_name": exportname, - "response_time": response_time_str, - "query_area": f"{geom_area} Sq Km ", - "binded_file_size": f"{round(inside_file_size/1000000)} MB", - "zip_file_size_bytes": zip_file_size, - } + + # getting from config in case api and frontend is not hosted on same url + client_host = config.get( + "API_CONFIG", + "api_host", + fallback=f"""{incoming_scheme}://{incoming_host}""", + ) + client_port = config.get("API_CONFIG", "api_port", fallback=8000) + + if client_port: + download_url = f"""{client_host}:{client_port}/v1/exports/{exportname}.zip""" # disconnected download portion from this endpoint because when there will be multiple hits at a same time we don't want function to get stuck waiting for user to download the file and deliver the response , we want to reduce waiting time and free function ! + else: + download_url = f"""{client_host}/v1/exports/{exportname}.zip""" # disconnected download portion from this endpoint because when there will be multiple hits at a same time we don't want function to get stuck waiting for user to download the file and deliver the response , we want to reduce waiting time and free function ! + + # getting file size of zip , units are in bytes converted to mb in response + zip_file_size = os.path.getsize(zip_temp_path) + response_time = dt.now() - start_time + response_time_str = str(response_time) + logging.info( + f"Done Export : {exportname} of {round(inside_file_size/1000000)} MB / {geom_area} sqkm in {response_time_str}" + ) + + return { + "download_url": download_url, + "file_name": exportname, + "response_time": response_time_str, + "query_area": f"{geom_area} Sq Km ", + "binded_file_size": f"{round(inside_file_size/1000000)} MB", + "zip_file_size_bytes": zip_file_size, + } + except Exception as ex: + self.update_state(state='FAILURE', meta={'exc': ex}) + def remove_file(path: str) -> None: diff --git a/API/raw_data.py b/API/raw_data.py index 401e62b7..7d4b46d5 100644 --- a/API/raw_data.py +++ b/API/raw_data.py @@ -539,6 +539,8 @@ def get_task_status(task_id): result = { "id": task_id, "status": task_result.status, + "status": task_result.state, + "result": task_result.result } return JSONResponse(result) From ec6654f3c005a641cd8ceb8ddd9b61ec4bca5a4b Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Thu, 22 Sep 2022 16:33:02 +0545 Subject: [PATCH 014/153] added config file validation and restored previous docker file --- Docker/Dockerfile | 22 ++++++++++++++++++++++ entrypoint.sh => Docker/entrypoint.sh | 3 ++- Dockerfile | 6 +----- docker-compose.yml | 1 + src/galaxy/config.py | 4 ++++ 5 files changed, 30 insertions(+), 6 deletions(-) create mode 100644 Docker/Dockerfile rename entrypoint.sh => Docker/entrypoint.sh (93%) diff --git a/Docker/Dockerfile b/Docker/Dockerfile new file mode 100644 index 00000000..c46cb72b --- /dev/null +++ b/Docker/Dockerfile @@ -0,0 +1,22 @@ +FROM python:3.9-bullseye + +ENV PIP_NO_CACHE_DIR=1 +RUN apt-get update && apt-get -y upgrade && \ + apt-get -y install gdal-bin python3-gdal && \ + apt-get -y autoremove && \ + apt-get clean + +COPY . /app + +WORKDIR /app + +RUN pip install --upgrade pip +RUN pip install -r requirements.docker.txt +RUN pip install -e . + +RUN chmod +x Docker/entrypoint.sh + +#CMD ["uvicorn", "API.main:app", "--reload", "--host", "0.0.0.0", "--port", "8000"] +ENTRYPOINT ["Docker/entrypoint.sh"] + +HEALTHCHECK CMD curl -f http://localhost:8000 || exit 1 diff --git a/entrypoint.sh b/Docker/entrypoint.sh similarity index 93% rename from entrypoint.sh rename to Docker/entrypoint.sh index 9d0b563d..da1b9d3e 100644 --- a/entrypoint.sh +++ b/Docker/entrypoint.sh @@ -1,4 +1,5 @@ -#!/bin/sh +#!/usr/bin/env bash + if [[ -z "${GALAXY_API_CONFIG_FILE}" ]]; then printf "Error: GALAXY_API_CONFIG_FILE environment variable missing.. Exiting!" exit 1 diff --git a/Dockerfile b/Dockerfile index b03a89a9..73cdbc08 100644 --- a/Dockerfile +++ b/Dockerfile @@ -14,10 +14,6 @@ RUN pip install --upgrade pip RUN pip install -r requirements.docker.txt RUN pip install -e . -COPY /entrypoint.sh /entrypoint.sh -RUN chmod +x /entrypoint.sh - -#CMD ["uvicorn", "API.main:app", "--reload", "--host", "0.0.0.0", "--port", "8000"] -ENTRYPOINT ["/entrypoint.sh"] +COPY /src/config.txt src/config.txt HEALTHCHECK CMD curl -f http://localhost:8000 || exit 1 diff --git a/docker-compose.yml b/docker-compose.yml index 45b76ded..c8a6a2ab 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -4,6 +4,7 @@ services: app: build: . + command: uvicorn API.main:app --reload --host 0.0.0.0 --port 8000 --no-use-colors --proxy-headers ports: - 8000:8000 volumes: diff --git a/src/galaxy/config.py b/src/galaxy/config.py index 356e1f27..db80d7cd 100644 --- a/src/galaxy/config.py +++ b/src/galaxy/config.py @@ -21,9 +21,13 @@ from configparser import ConfigParser import logging +import os CONFIG_FILE_PATH = "src/config.txt" +if os.path.exists(CONFIG_FILE_PATH) is False: + raise "Config file does not exist : src/config.txt" + config = ConfigParser() config.read(CONFIG_FILE_PATH) From b795f5412227cfc8553e0a04a0e5ce9f2a4dffa9 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Thu, 22 Sep 2022 16:41:19 +0545 Subject: [PATCH 015/153] removed systemctl part --- README.md | 2 -- 1 file changed, 2 deletions(-) diff --git a/README.md b/README.md index 6b956749..de4b89b1 100644 --- a/README.md +++ b/README.md @@ -158,8 +158,6 @@ port= Check redis is running on your machine -```sudo systemctl status redis``` - Login to redis cli ```redis-cli``` From fb6ba2571a34d38b60b107511f950347e59e110a Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Thu, 22 Sep 2022 17:36:41 +0545 Subject: [PATCH 016/153] updated docs --- README.md | 128 +++------------------------- docs/CONFIG_DOC.md | 116 ++++++++++++++++++++++++- docs/GETTING_STARTED_WITH_DOCKER.md | 13 +++ 3 files changed, 138 insertions(+), 119 deletions(-) create mode 100644 docs/GETTING_STARTED_WITH_DOCKER.md diff --git a/README.md b/README.md index de4b89b1..f0c325df 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,9 @@ ## Getting Started +API Can be installed through docker or manually to local machine . +To get started with docker follow [docs/GETTING_STARTED_WITH_DOCKER](https://github.com/hotosm/galaxy-api/blob/develop/docs/GETTING_STARTED_WITH_DOCKER.md) + ### 1. Install requirements. Install gdal on your machine , for example on Ubuntu @@ -35,126 +38,15 @@ Install gdal python ( Include your gdal version , if you are using different ver ```pip install gdal==3.0.2``` +### 2. Setup required config for API +Setup necessary config for API from [docs/CONFIG.DOC](https://github.com/hotosm/galaxy-api/blob/develop/docs/CONFIG_DOC.md) -### 2. Create ```config.txt``` inside src directory. -![image](https://user-images.githubusercontent.com/36752999/188402566-80dc9633-5d4e-479c-97dc-9e8a4999b385.png) - - -### 3. Setup Underpass - Run underpass from [here](https://github.com/hotosm/underpass/blob/master/doc/getting-started.md) OR Create database "underpass" in your local postgres and insert sample dump from ```/tests/src/fixtures/underpass.sql ``` - -```psql -U postgres -h localhost underpass < underpass.sql``` -### 4. Setup Insights -Setup insights from [here](https://github.com/hotosm/insights) OR Create database "insights" in your local postgres and insert sample dump from ```/tests/src/fixtures/insights.sql ``` - -```psql -U postgres -h localhost insights < insights.sql``` - -### 5. Setup Raw Data -Initialize rawdata from [here](https://github.com/hotosm/underpass/tree/master/raw) OR Create database "raw" in your local postgres and insert sample dump from ```/tests/src/fixtures/raw_data.sql ``` - -```psql -U postgres -h localhost raw < raw_data.sql``` - - -### 6. Setup Oauth -Login to [OSM](https://www.openstreetmap.org/) , Click on My Settings and register your local galaxy app to Oauth2applications - -![image](https://user-images.githubusercontent.com/36752999/188452619-aababf28-b685-4141-b381-9c25d0367b57.png) - - -Check on read user preferences and Enter redirect URI as following -```http://127.0.0.1:8000/latest/auth/callback/``` - -Grab Client ID and Client Secret and put it inside config.txt as OAUTH Block , you can generate secret key for your application by yourself - - -### 7. Put your credentials inside config.txt -Insert your config blocks with the database credentials where you have underpass ,insight and tm in your database - -``` -[INSIGHTS] -host=localhost -user=postgres -password=admin -database=insights -port=5432 - -[UNDERPASS] -host=localhost -user=postgres -password=admin -database=underpass -port=5432 - -[RAW_DATA] -host=localhost -user=postgres -password=admin -database=raw -port=5432 - -[OAUTH] -client_id= your client id -client_secret= your client secret -url=https://www.openstreetmap.org -scope=read_prefs -login_redirect_uri=http://127.0.0.1:8000/latest/auth/callback/ -secret_key=jnfdsjkfndsjkfnsdkjfnskfn - -[API_CONFIG] -env=dev - -``` - -#### Optional Configuration - -You can further customize API if you wish with API_CONFIG Block - -``` -[API_CONFIG] -export_path=exports/ # used to store export path -api_host=http://127.0.0.1 # you can define this if you have different host -api_port=8000 -max_area=100000 # max area to support for rawdata input -use_connection_pooling=True # default it will not use connection pooling but you can configure api to use to for psycopg2 connections -log_level=info #options are info,debug,warning,error -env=dev # default is dev , supported values are dev and prod -shp_limit=6000 # in mb default is 4096 -``` -Based on your requirement you can also customize rawdata exports parameter using EXPORT_UPLOAD block - -``` -[EXPORT_UPLOAD] -FILE_UPLOAD_METHOD=disk # options are s3,disk , default disk -AWS_ACCESS_KEY_ID= your id -AWS_SECRET_ACCESS_KEY= yourkey -BUCKET_NAME= your bucket name -``` - -Celery Configuration options: - -Galaxy API uses Celery 5 and Redis for task queue management , Currently implemented for Rawdata endpoint. 6379 is the default port , You can change the port according to your configuration - -``` -[CELERY] -CELERY_BROKER_URL=redis://localhost:6379 -CELERY_RESULT_BACKEND=redis://localhost:6379 -``` -##### Setup Tasking Manager Database for TM related development - -You can setup [Tasking manager](https://github.com/hotosm/tasking-manager) and add those block to config.txt -``` -[TM] -host= -user= -password= -port= -``` -### 8. Run server +### 3. Run server ```uvicorn API.main:app --reload``` -### 9. Check Redis server +### 4. Check Redis server Check redis is running on your machine @@ -166,18 +58,18 @@ Hit ```ping``` it should return pong If REDIS is not running check out its [documentation](https://redis.io/docs/getting-started/) -### 10. Start Celery Worker +### 5. Start Celery Worker You should be able to start [celery](https://docs.celeryq.dev/en/stable/getting-started/first-steps-with-celery.html#running-the-celery-worker-server) worker by running following command on different shell ```celery --app API.api_worker worker --loglevel=INFO``` -### 11 . [OPTIONAL] Start flower for monitoring queue +### 6 . [OPTIONAL] Start flower for monitoring queue API uses flower for monitoring the Celery distributed queue. Run this command on different shell ```celery --app API.api_worker flower --port=5550 --broker=redis://redis:6379/``` -### 12. Navigate to Fast API Docs to get details about API Endpoint +### 7. Navigate to Fast API Docs to get details about API Endpoint After sucessfully running server , hit [this](http://127.0.0.1:8000/latest/docs) URL on your browser diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index aa41c0a5..e470687b 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -1 +1,115 @@ -This document describes what are the config values that API can accept , and what do they mean + + +### 1. Create ```config.txt``` inside src directory. +![image](https://user-images.githubusercontent.com/36752999/188402566-80dc9633-5d4e-479c-97dc-9e8a4999b385.png) + + +### 2. Setup Underpass + Run underpass from [here](https://github.com/hotosm/underpass/blob/master/doc/getting-started.md) OR Create database "underpass" in your local postgres and insert sample dump from ```/tests/src/fixtures/underpass.sql ``` + +```psql -U postgres -h localhost underpass < underpass.sql``` +### 3. Setup Insights +Setup insights from [here](https://github.com/hotosm/insights) OR Create database "insights" in your local postgres and insert sample dump from ```/tests/src/fixtures/insights.sql ``` + +```psql -U postgres -h localhost insights < insights.sql``` + +### 4. Setup Raw Data +Initialize rawdata from [here](https://github.com/hotosm/underpass/tree/master/raw) OR Create database "raw" in your local postgres and insert sample dump from ```/tests/src/fixtures/raw_data.sql ``` + +```psql -U postgres -h localhost raw < raw_data.sql``` + + +### 5. Setup Oauth +Login to [OSM](https://www.openstreetmap.org/) , Click on My Settings and register your local galaxy app to Oauth2applications + +![image](https://user-images.githubusercontent.com/36752999/188452619-aababf28-b685-4141-b381-9c25d0367b57.png) + + +Check on read user preferences and Enter redirect URI as following +```http://127.0.0.1:8000/latest/auth/callback/``` + +Grab Client ID and Client Secret and put it inside config.txt as OAUTH Block , you can generate secret key for your application by yourself + + +### 6. Put your credentials inside config.txt +Insert your config blocks with the database credentials where you have underpass ,insight and tm in your database + +``` +[INSIGHTS] +host=localhost +user=postgres +password=admin +database=insights +port=5432 + +[UNDERPASS] +host=localhost +user=postgres +password=admin +database=underpass +port=5432 + +[RAW_DATA] +host=localhost +user=postgres +password=admin +database=raw +port=5432 + +[OAUTH] +client_id= your client id +client_secret= your client secret +url=https://www.openstreetmap.org +scope=read_prefs +login_redirect_uri=http://127.0.0.1:8000/latest/auth/callback/ +secret_key=jnfdsjkfndsjkfnsdkjfnskfn + +[API_CONFIG] +env=dev + +``` + +#### Optional Configuration + +You can further customize API if you wish with API_CONFIG Block + +``` +[API_CONFIG] +export_path=exports/ # used to store export path +api_host=http://127.0.0.1 # you can define this if you have different host +api_port=8000 +max_area=100000 # max area to support for rawdata input +use_connection_pooling=True # default it will not use connection pooling but you can configure api to use to for psycopg2 connections +log_level=info #options are info,debug,warning,error +env=dev # default is dev , supported values are dev and prod +shp_limit=6000 # in mb default is 4096 +``` +Based on your requirement you can also customize rawdata exports parameter using EXPORT_UPLOAD block + +``` +[EXPORT_UPLOAD] +FILE_UPLOAD_METHOD=disk # options are s3,disk , default disk +AWS_ACCESS_KEY_ID= your id +AWS_SECRET_ACCESS_KEY= yourkey +BUCKET_NAME= your bucket name +``` + +Celery Configuration options: + +Galaxy API uses Celery 5 and Redis for task queue management , Currently implemented for Rawdata endpoint. 6379 is the default port , You can change the port according to your configuration + +``` +[CELERY] +CELERY_BROKER_URL=redis://localhost:6379 +CELERY_RESULT_BACKEND=redis://localhost:6379 +``` +##### Setup Tasking Manager Database for TM related development + +You can setup [Tasking manager](https://github.com/hotosm/tasking-manager) and add those block to config.txt +``` +[TM] +host= +user= +password= +port= +``` \ No newline at end of file diff --git a/docs/GETTING_STARTED_WITH_DOCKER.md b/docs/GETTING_STARTED_WITH_DOCKER.md new file mode 100644 index 00000000..d8d23c21 --- /dev/null +++ b/docs/GETTING_STARTED_WITH_DOCKER.md @@ -0,0 +1,13 @@ +### 1. First Checkout the repository and Setup Config +Follow [instructions](https://github.com/hotosm/galaxy-api/blob/develop/docs/CONFIG_DOC.md) and create config.txt inside /src/ + +### 2. Create the images and spin up the Docker containers: +```docker-compose up -d --build``` + +### 3. Check Servers + +Uvicorn should be running on 8000 port , Redis on default port , Celery with a worker and Flower on 5550 + +```http://127.0.0.1:8000/latest/docs``` + +Now follow Readme.md \ No newline at end of file From 3af19ce0fac5c4dc74626dc5850eb306487ad37b Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Thu, 22 Sep 2022 17:49:51 +0545 Subject: [PATCH 017/153] updated config doc --- docs/CONFIG_DOC.md | 22 ++++++++++++++++------ 1 file changed, 16 insertions(+), 6 deletions(-) diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index e470687b..3e06f39c 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -105,11 +105,21 @@ CELERY_RESULT_BACKEND=redis://localhost:6379 ``` ##### Setup Tasking Manager Database for TM related development -You can setup [Tasking manager](https://github.com/hotosm/tasking-manager) and add those block to config.txt +Setup Tasking manager from [here](https://github.com/hotosm/tasking-manager/blob/develop/docs/developers/development-setup.md#backend) OR Create database "tm" in your local postgres and insert sample dump from [TM test dump](https://github.com/hotosm/tasking-manager/blob/develop/tests/database/tasking-manager.sql). +(```wget https://raw.githubusercontent.com/hotosm/tasking-manager/develop/tests/database/tasking-manager.sql```) + +```psql -U postgres -h localhost tm < tasking-manager.sql``` + +Add those block to config.txt with the value you use in the tasking manager configuration. ``` [TM] -host= -user= -password= -port= -``` \ No newline at end of file +host=localhost +user=postgres +password=admin +database=tm +port=5432 +``` + +You can test it later after running server with the `/mapathon/detail/` endpoint and with the following input: +`{"fromTimestamp":"2019-04-08 10:00:00.000000","toTimestamp":"2019-04-08 11:00:00.000000","projectIds":[1],"hashtags":[]}` + From f9dab03ccee7c09944fc983d7bdc5ddb4db12aee Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Thu, 22 Sep 2022 18:07:23 +0545 Subject: [PATCH 018/153] Update GETTING_STARTED_WITH_DOCKER.md --- docs/GETTING_STARTED_WITH_DOCKER.md | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/docs/GETTING_STARTED_WITH_DOCKER.md b/docs/GETTING_STARTED_WITH_DOCKER.md index d8d23c21..ecd67975 100644 --- a/docs/GETTING_STARTED_WITH_DOCKER.md +++ b/docs/GETTING_STARTED_WITH_DOCKER.md @@ -1,4 +1,7 @@ ### 1. First Checkout the repository and Setup Config + +```git clone https://github.com/hotosm/galaxy-api.git``` + Follow [instructions](https://github.com/hotosm/galaxy-api/blob/develop/docs/CONFIG_DOC.md) and create config.txt inside /src/ ### 2. Create the images and spin up the Docker containers: @@ -10,4 +13,4 @@ Uvicorn should be running on 8000 port , Redis on default port , Celery with a w ```http://127.0.0.1:8000/latest/docs``` -Now follow Readme.md \ No newline at end of file +Now follow Readme.md From 3d02bb972525c3f4b956b5724f69f80cffe15b64 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Thu, 22 Sep 2022 18:09:37 +0545 Subject: [PATCH 019/153] Update GETTING_STARTED_WITH_DOCKER.md --- docs/GETTING_STARTED_WITH_DOCKER.md | 15 ++++++++++++--- 1 file changed, 12 insertions(+), 3 deletions(-) diff --git a/docs/GETTING_STARTED_WITH_DOCKER.md b/docs/GETTING_STARTED_WITH_DOCKER.md index ecd67975..8a83b5fc 100644 --- a/docs/GETTING_STARTED_WITH_DOCKER.md +++ b/docs/GETTING_STARTED_WITH_DOCKER.md @@ -1,16 +1,25 @@ ### 1. First Checkout the repository and Setup Config -```git clone https://github.com/hotosm/galaxy-api.git``` +``` +git clone https://github.com/hotosm/galaxy-api.git +``` Follow [instructions](https://github.com/hotosm/galaxy-api/blob/develop/docs/CONFIG_DOC.md) and create config.txt inside /src/ ### 2. Create the images and spin up the Docker containers: -```docker-compose up -d --build``` +``` +docker-compose up -d --build +``` ### 3. Check Servers Uvicorn should be running on 8000 port , Redis on default port , Celery with a worker and Flower on 5550 -```http://127.0.0.1:8000/latest/docs``` +``` +http://127.0.0.1:8000/latest/docs +``` +``` +http://127.0.0.1:8000/5550/ +``` Now follow Readme.md From c057081652f0fae8be64cc0baa896feb0e5634e5 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Thu, 22 Sep 2022 18:13:00 +0545 Subject: [PATCH 020/153] Update README.md --- README.md | 59 ++++++++++++++++++++++++++++++++++++++++++------------- 1 file changed, 45 insertions(+), 14 deletions(-) diff --git a/README.md b/README.md index 966873e7..6b2958e6 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ ## Getting Started API Can be installed through docker or manually to local machine . -To get started with docker follow [docs/GETTING_STARTED_WITH_DOCKER](https://github.com/hotosm/galaxy-api/blob/develop/docs/GETTING_STARTED_WITH_DOCKER.md) +To get started with docker follow [GETTING_STARTED_WITH_DOCKER](https://github.com/hotosm/galaxy-api/blob/develop/docs/GETTING_STARTED_WITH_DOCKER.md) ### 1. Install requirements. @@ -24,19 +24,27 @@ sudo apt-get install redis Clone the Repo to your machine -``` git clone https://github.com/hotosm/galaxy-api.git ``` +``` +git clone https://github.com/hotosm/galaxy-api.git +``` Navigate to repo -``` cd galaxy-api ``` +``` +cd galaxy-api +``` Install python dependencies -```pip install -r requirements.txt``` +``` +pip install -r requirements.txt +``` Install gdal python ( Include your gdal version , if you are using different version ) -```pip install gdal==3.0.2``` +``` +pip install gdal==3.0.2 +``` ### 2. Setup required config for API @@ -44,7 +52,9 @@ Setup necessary config for API from [docs/CONFIG.DOC](https://github.com/hotosm/ ### 3. Run server -```uvicorn API.main:app --reload``` +``` +uvicorn API.main:app --reload +``` ### 4. Check Redis server @@ -52,7 +62,9 @@ Check redis is running on your machine Login to redis cli -```redis-cli``` +``` +redis-cli +``` Hit ```ping``` it should return pong @@ -61,22 +73,33 @@ If REDIS is not running check out its [documentation](https://redis.io/docs/gett ### 5. Start Celery Worker You should be able to start [celery](https://docs.celeryq.dev/en/stable/getting-started/first-steps-with-celery.html#running-the-celery-worker-server) worker by running following command on different shell -```celery --app API.api_worker worker --loglevel=INFO``` +``` +celery --app API.api_worker worker --loglevel=INFO +``` ### 6 . [OPTIONAL] Start flower for monitoring queue API uses flower for monitoring the Celery distributed queue. Run this command on different shell -```celery --app API.api_worker flower --port=5550 --broker=redis://redis:6379/``` +``` +celery --app API.api_worker flower --port=5550 --broker=redis://redis:6379/ +``` ### 7. Navigate to Fast API Docs to get details about API Endpoint After sucessfully running server , hit [this](http://127.0.0.1:8000/latest/docs) URL on your browser -```http://127.0.0.1:8000/latest/docs``` +``` +http://127.0.0.1:8000/latest/docs +``` Flower dashboard should be available on 5550 localhost port. +``` +http://127.0.0.1:5550/ +``` + +## Check API Installation ### Check Authetication 1. Hit /auth/login/ @@ -100,18 +123,24 @@ Repeat the steps to get a new access_token. Galaxy-API uses pytest for tests ,Navigate to root Dir, Install package in editable mode -```pip install -e .``` +``` +pip install -e . +``` Make sure you have postgresql installed locally with postgis extension enabled , Now Run Pytest -```py.test -v -s``` +``` +py.test -v -s +``` Run Individual tests -```py.test -k test function name``` +``` +py.test -k test function name +``` # Galaxy Package @@ -119,7 +148,9 @@ Run Individual tests ## Local Install -```python setup.py install``` +``` +python setup.py install +``` Now import as : From 5b36c9926d02f08c20951be1b14ee30c51965683 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Thu, 22 Sep 2022 18:14:11 +0545 Subject: [PATCH 021/153] Update GETTING_STARTED_WITH_DOCKER.md --- docs/GETTING_STARTED_WITH_DOCKER.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/GETTING_STARTED_WITH_DOCKER.md b/docs/GETTING_STARTED_WITH_DOCKER.md index 8a83b5fc..66e42221 100644 --- a/docs/GETTING_STARTED_WITH_DOCKER.md +++ b/docs/GETTING_STARTED_WITH_DOCKER.md @@ -22,4 +22,4 @@ http://127.0.0.1:8000/latest/docs http://127.0.0.1:8000/5550/ ``` -Now follow Readme.md +Now, Continue Readme. Check installation from [here](https://github.com/hotosm/galaxy-api/blob/feature/celery/README.md#check-api-installation) From 2f161f2829202de0a875097b8903beed67f61462 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Thu, 22 Sep 2022 18:18:51 +0545 Subject: [PATCH 022/153] Update README.md --- README.md | 6 ++---- 1 file changed, 2 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index 6b2958e6..aa6e4297 100644 --- a/README.md +++ b/README.md @@ -9,12 +9,10 @@ To get started with docker follow [GETTING_STARTED_WITH_DOCKER](https://github.c ### 1. Install requirements. -Install gdal on your machine , for example on Ubuntu +Install [gdal](https://gdal.org/index.html) on your machine , for example on Ubuntu ``` -sudo apt-add-repository ppa:ubuntugis/ubuntugis-unstable -sudo apt-get update -sudo apt-get install gdal-bin libgdal-dev +apt-get update && apt-get -y upgrade && apt-get -y install gdal-bin python3-gdal && apt-get -y autoremove && apt-get clean ``` Install [redis](https://redis.io/docs/getting-started/installation/) on your system From 4354fa0833c249f95a82f802b3a3e7f420801611 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Thu, 22 Sep 2022 18:20:17 +0545 Subject: [PATCH 023/153] Update README.md --- README.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/README.md b/README.md index aa6e4297..abf271d8 100644 --- a/README.md +++ b/README.md @@ -46,6 +46,8 @@ pip install gdal==3.0.2 ### 2. Setup required config for API +Make sure you have https://www.postgresql.org/ setup in your machine. + Setup necessary config for API from [docs/CONFIG.DOC](https://github.com/hotosm/galaxy-api/blob/develop/docs/CONFIG_DOC.md) ### 3. Run server From 7af65490345c85d8e5461d642897deb55e286aa4 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Thu, 22 Sep 2022 18:21:46 +0545 Subject: [PATCH 024/153] Update CONFIG_DOC.md --- docs/CONFIG_DOC.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index 3e06f39c..e75a281b 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -1,4 +1,6 @@ +Before getting started on config Make sure you have https://www.postgresql.org/ setup in your machine. + ### 1. Create ```config.txt``` inside src directory. ![image](https://user-images.githubusercontent.com/36752999/188402566-80dc9633-5d4e-479c-97dc-9e8a4999b385.png) From aad88700a02019b525afe9f8fb6937471230e4f1 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 18:25:57 +0545 Subject: [PATCH 025/153] removed config sample unnecessary text and moved instructions to config_doc --- src/config.txt.sample | 44 +++++++------------------------------------ 1 file changed, 7 insertions(+), 37 deletions(-) diff --git a/src/config.txt.sample b/src/config.txt.sample index ef0a48bb..37f3b061 100644 --- a/src/config.txt.sample +++ b/src/config.txt.sample @@ -1,7 +1,3 @@ -############# -# MANDATORY # -############# - [INSIGHTS] host=localhost user=postgres @@ -23,6 +19,13 @@ password=admin database=raw port=5432 +[TM] +host=localhost +user=postgres +password=admin +database=tm +port=5432 + [OAUTH] client_id= client_secret= @@ -33,36 +36,3 @@ secret_key=PutSomethingRandmHere [API_CONFIG] env=dev - -############# -# OPTIONNAL # -############# - -# If enable this [API_CONFIG] section, remove the previous one -#[API_CONFIG] -#export_path=exports/ # used to store export path -#api_host=http://127.0.0.1 -#api_port=8000 -#max_area=100000 # max area to support for rawdata input -#use_connection_pooling=True -#log_level=info #options are info,debug,warning,error -#env=dev # default is prod , supported values are dev and prod -#shp_limit=6000 # in mb default is 4096 - -#[EXPORT_UPLOAD] -#FILE_UPLOAD_METHOD=disk # options are s3,disk -#AWS_ACCESS_KEY_ID= your id -#AWS_SECRET_ACCESS_KEY= yourkey -#BUCKET_NAME= your bucket name - -#[TM] -#host=localhost -#user=postgres -#password=admin -#database=tm -#port=5432 - -#[CELERY] -#CELERY_BROKER_URL=redis://localhost:6379 -#CELERY_RESULT_BACKEND=redis://localhost:6379 - From b5f99c64512f24e2577b219d7030b78ca9c4e946 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 18:45:21 +0545 Subject: [PATCH 026/153] changed workflow for unit test with postgis image --- .github/workflows/Unit-Test.yml | 28 +++++++--------------------- 1 file changed, 7 insertions(+), 21 deletions(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index ead3b36f..025f4578 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -13,6 +13,11 @@ jobs: deploy: runs-on: ubuntu-latest + services: + -image: postgis/postgis:14-3.3 + env: + POSTGRES_PASSWORD: admin + POSTGRES_DB: postgres steps: - uses: actions/checkout@v2 @@ -20,28 +25,9 @@ jobs: uses: actions/setup-python@v1 with: python-version: 3.8 - - name: Check postgresql version + - name: Install gdal run: | - psql -V - - name: Remove postgresql version 14 - run: | - sudo apt-get --purge remove postgresql - sudo apt-get purge postgresql* - sudo apt-get --purge remove postgresql postgresql-doc postgresql-common - - name: Set up postgresql 12 - uses: harmon758/postgresql-action@v1 - with: - postgresql version: '12' - - name: Install Postgis 3 for Psql 12 - run: | - sudo apt-get update - sudo apt install postgis postgresql-12-postgis-3 - - name: Install gdal - run: | - sudo apt-add-repository ppa:ubuntugis/ubuntugis-unstable - sudo apt-get update - sudo apt-get install gdal-bin libgdal-dev - + apt-get update && apt-get -y upgrade && apt-get -y install gdal-bin python3-gdal && apt-get -y autoremove && apt-get clean - name: Install Dependencies run: | python -m pip install --upgrade pip From e2b8a74a402e5349031eab301e86987e2cfdf8de Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 18:48:01 +0545 Subject: [PATCH 027/153] fixed yaml indent error --- .github/workflows/Unit-Test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 025f4578..37775b2f 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -27,7 +27,7 @@ jobs: python-version: 3.8 - name: Install gdal run: | - apt-get update && apt-get -y upgrade && apt-get -y install gdal-bin python3-gdal && apt-get -y autoremove && apt-get clean + apt-get update && apt-get -y upgrade && apt-get -y install gdal-bin python3-gdal && apt-get -y autoremove && apt-get clean - name: Install Dependencies run: | python -m pip install --upgrade pip From 00dcf8a0178950ab1efa49ffda86bee51db0cf8e Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 18:49:20 +0545 Subject: [PATCH 028/153] indent fix --- .github/workflows/Unit-Test.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 37775b2f..90b9b120 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -16,8 +16,8 @@ jobs: services: -image: postgis/postgis:14-3.3 env: - POSTGRES_PASSWORD: admin - POSTGRES_DB: postgres + POSTGRES_PASSWORD: admin + POSTGRES_DB: postgres steps: - uses: actions/checkout@v2 From 7a4583878ca5db7850d2a349532abca087375754 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 18:50:32 +0545 Subject: [PATCH 029/153] added postgres --- .github/workflows/Unit-Test.yml | 6 +++++- 1 file changed, 5 insertions(+), 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 90b9b120..92bdc334 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -14,10 +14,14 @@ jobs: runs-on: ubuntu-latest services: - -image: postgis/postgis:14-3.3 + postgres: + image: postgis/postgis:14-3.3 env: POSTGRES_PASSWORD: admin POSTGRES_DB: postgres + ports: + - 5432:5432 + options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 steps: - uses: actions/checkout@v2 From be3988dc8fbbd5e3fb0d1b11d6b56dc887400b42 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 18:52:04 +0545 Subject: [PATCH 030/153] added sudo command --- .github/workflows/Unit-Test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 92bdc334..f9758914 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -31,7 +31,7 @@ jobs: python-version: 3.8 - name: Install gdal run: | - apt-get update && apt-get -y upgrade && apt-get -y install gdal-bin python3-gdal && apt-get -y autoremove && apt-get clean + sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - name: Install Dependencies run: | python -m pip install --upgrade pip From 7c430acc5c4de2f79670ee70d77f858fce3a6b6c Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 19:20:52 +0545 Subject: [PATCH 031/153] tet create database option in github action --- .github/workflows/Unit-Test.yml | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index f9758914..31dc34fd 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -21,7 +21,7 @@ jobs: POSTGRES_DB: postgres ports: - 5432:5432 - options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 5 + options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 2 steps: - uses: actions/checkout@v2 @@ -32,6 +32,9 @@ jobs: - name: Install gdal run: | sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean + - name: Create Databases + run: | + PGPASSWORD=admin psql -U postgres -tc "CREATE DATABASE underpass;" - name: Install Dependencies run: | python -m pip install --upgrade pip From 9a990d90cf57e9426dd06da767d8e4a1067ee831 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 19:27:35 +0545 Subject: [PATCH 032/153] check to insert data --- .github/workflows/Unit-Test.yml | 5 ++++- README.md | 5 +++-- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 31dc34fd..e6c2727d 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -31,7 +31,10 @@ jobs: python-version: 3.8 - name: Install gdal run: | - sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean + sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get cleansudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean + - name: Insert sample data + run : | + psql -U postgres -h localhost insights < /tests/src/fixtures/insights.sql - name: Create Databases run: | PGPASSWORD=admin psql -U postgres -tc "CREATE DATABASE underpass;" diff --git a/README.md b/README.md index abf271d8..485d7481 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,8 @@ To get started with docker follow [GETTING_STARTED_WITH_DOCKER](https://github.c Install [gdal](https://gdal.org/index.html) on your machine , for example on Ubuntu ``` -apt-get update && apt-get -y upgrade && apt-get -y install gdal-bin python3-gdal && apt-get -y autoremove && apt-get clean +sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get cleansudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean + ``` Install [redis](https://redis.io/docs/getting-started/installation/) on your system @@ -99,7 +100,7 @@ Flower dashboard should be available on 5550 localhost port. http://127.0.0.1:5550/ ``` -## Check API Installation +## Check API Installation ### Check Authetication 1. Hit /auth/login/ From 72e6a845e836afd2bbaacf7d5524630a1866ae8a Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Thu, 22 Sep 2022 19:29:26 +0545 Subject: [PATCH 033/153] Update CONFIG_DOC.md --- docs/CONFIG_DOC.md | 45 +++++++++++++++++++++++++++++++++++---------- 1 file changed, 35 insertions(+), 10 deletions(-) diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index e75a281b..f8673d72 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -7,18 +7,33 @@ Before getting started on config Make sure you have https://www.postgresql.org/ ### 2. Setup Underpass - Run underpass from [here](https://github.com/hotosm/underpass/blob/master/doc/getting-started.md) OR Create database "underpass" in your local postgres and insert sample dump from ```/tests/src/fixtures/underpass.sql ``` + Run underpass from [here](https://github.com/hotosm/underpass/blob/master/doc/getting-started.md) OR Create database "underpass" in your local postgres and insert sample dump from +``` +/tests/src/fixtures/underpass.sql +``` -```psql -U postgres -h localhost underpass < underpass.sql``` +``` +psql -U postgres -h localhost underpass < underpass.sql +``` ### 3. Setup Insights -Setup insights from [here](https://github.com/hotosm/insights) OR Create database "insights" in your local postgres and insert sample dump from ```/tests/src/fixtures/insights.sql ``` +Setup insights from [here](https://github.com/hotosm/insights) OR Create database "insights" in your local postgres and insert sample dump from +``` +/tests/src/fixtures/insights.sql +``` -```psql -U postgres -h localhost insights < insights.sql``` +``` +psql -U postgres -h localhost insights < insights.sql +``` ### 4. Setup Raw Data -Initialize rawdata from [here](https://github.com/hotosm/underpass/tree/master/raw) OR Create database "raw" in your local postgres and insert sample dump from ```/tests/src/fixtures/raw_data.sql ``` +Initialize rawdata from [here](https://github.com/hotosm/underpass/tree/master/raw) OR Create database "raw" in your local postgres and insert sample dump from +``` +/tests/src/fixtures/raw_data.sql +``` -```psql -U postgres -h localhost raw < raw_data.sql``` +``` +psql -U postgres -h localhost raw < raw_data.sql +``` ### 5. Setup Oauth @@ -28,7 +43,9 @@ Login to [OSM](https://www.openstreetmap.org/) , Click on My Settings and regist Check on read user preferences and Enter redirect URI as following -```http://127.0.0.1:8000/latest/auth/callback/``` +``` +http://127.0.0.1:8000/latest/auth/callback/ +``` Grab Client ID and Client Secret and put it inside config.txt as OAUTH Block , you can generate secret key for your application by yourself @@ -108,9 +125,15 @@ CELERY_RESULT_BACKEND=redis://localhost:6379 ##### Setup Tasking Manager Database for TM related development Setup Tasking manager from [here](https://github.com/hotosm/tasking-manager/blob/develop/docs/developers/development-setup.md#backend) OR Create database "tm" in your local postgres and insert sample dump from [TM test dump](https://github.com/hotosm/tasking-manager/blob/develop/tests/database/tasking-manager.sql). -(```wget https://raw.githubusercontent.com/hotosm/tasking-manager/develop/tests/database/tasking-manager.sql```) -```psql -U postgres -h localhost tm < tasking-manager.sql``` +``` +wget https://raw.githubusercontent.com/hotosm/tasking-manager/develop/tests/database/tasking-manager.sql +``` + + +``` +psql -U postgres -h localhost tm < tasking-manager.sql +``` Add those block to config.txt with the value you use in the tasking manager configuration. ``` @@ -123,5 +146,7 @@ port=5432 ``` You can test it later after running server with the `/mapathon/detail/` endpoint and with the following input: -`{"fromTimestamp":"2019-04-08 10:00:00.000000","toTimestamp":"2019-04-08 11:00:00.000000","projectIds":[1],"hashtags":[]}` +` +{"fromTimestamp":"2019-04-08 10:00:00.000000","toTimestamp":"2019-04-08 11:00:00.000000","projectIds":[1],"hashtags":[]} +` From ebf5630c3cebe0f221d00103314026a6f4cb16fa Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 19:32:26 +0545 Subject: [PATCH 034/153] removed auto remove --- .github/workflows/Unit-Test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index e6c2727d..1b9fdcd0 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -31,7 +31,7 @@ jobs: python-version: 3.8 - name: Install gdal run: | - sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get cleansudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean + sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get cleansudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal - name: Insert sample data run : | psql -U postgres -h localhost insights < /tests/src/fixtures/insights.sql From 3c782a0c00e6cb20316548dbea5a70fe73711696 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 19:33:10 +0545 Subject: [PATCH 035/153] removed typo --- .github/workflows/Unit-Test.yml | 2 +- README.md | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 1b9fdcd0..1bb865bc 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -31,7 +31,7 @@ jobs: python-version: 3.8 - name: Install gdal run: | - sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get cleansudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal + sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - name: Insert sample data run : | psql -U postgres -h localhost insights < /tests/src/fixtures/insights.sql diff --git a/README.md b/README.md index 485d7481..8c6a742a 100644 --- a/README.md +++ b/README.md @@ -12,7 +12,7 @@ To get started with docker follow [GETTING_STARTED_WITH_DOCKER](https://github.c Install [gdal](https://gdal.org/index.html) on your machine , for example on Ubuntu ``` -sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get cleansudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean +sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean ``` Install [redis](https://redis.io/docs/getting-started/installation/) on your system From c9c503974ebeb34eb1d0469841997ae643b987ef Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 19:46:20 +0545 Subject: [PATCH 036/153] check for file path --- .github/workflows/Unit-Test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 1bb865bc..c32ed77e 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -34,7 +34,7 @@ jobs: sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - name: Insert sample data run : | - psql -U postgres -h localhost insights < /tests/src/fixtures/insights.sql + psql -U postgres -h localhost insights < tests/src/fixtures/insights.sql - name: Create Databases run: | PGPASSWORD=admin psql -U postgres -tc "CREATE DATABASE underpass;" From 31f681b6f06c601fbcfb3e168f8b22e7f787daee Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 20:08:32 +0545 Subject: [PATCH 037/153] rerun with filepath --- .github/workflows/Unit-Test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index c32ed77e..1dbd84e7 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -35,7 +35,7 @@ jobs: - name: Insert sample data run : | psql -U postgres -h localhost insights < tests/src/fixtures/insights.sql - - name: Create Databases + - name: Create Database run: | PGPASSWORD=admin psql -U postgres -tc "CREATE DATABASE underpass;" - name: Install Dependencies From d1738835e89cc31eae43a7bb2459385b96874625 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 20:15:59 +0545 Subject: [PATCH 038/153] added password --- .github/workflows/Unit-Test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 1dbd84e7..7f83a3a3 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -34,7 +34,7 @@ jobs: sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - name: Insert sample data run : | - psql -U postgres -h localhost insights < tests/src/fixtures/insights.sql + PGPASSWORD=admin psql -U postgres -h localhost insights < tests/src/fixtures/insights.sql - name: Create Database run: | PGPASSWORD=admin psql -U postgres -tc "CREATE DATABASE underpass;" From 15c6a510254ccb375b5b68816760f9f1c7319a91 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 20:20:39 +0545 Subject: [PATCH 039/153] added host information --- .github/workflows/Unit-Test.yml | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 7f83a3a3..95effc00 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -32,12 +32,12 @@ jobs: - name: Install gdal run: | sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean + - name: Create Database + run: | + PGPASSWORD=admin psql -U postgres -h localhost -tc "CREATE DATABASE underpass;" - name: Insert sample data run : | PGPASSWORD=admin psql -U postgres -h localhost insights < tests/src/fixtures/insights.sql - - name: Create Database - run: | - PGPASSWORD=admin psql -U postgres -tc "CREATE DATABASE underpass;" - name: Install Dependencies run: | python -m pip install --upgrade pip From eea21060dce314c4cbe1eae5605e48780cb1f1cd Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 20:38:13 +0545 Subject: [PATCH 040/153] added -c option --- .github/workflows/Unit-Test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 95effc00..964dbb0a 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -34,7 +34,7 @@ jobs: sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - name: Create Database run: | - PGPASSWORD=admin psql -U postgres -h localhost -tc "CREATE DATABASE underpass;" + PGPASSWORD=admin psql -U postgres -h localhost -c "CREATE DATABASE underpass;" - name: Insert sample data run : | PGPASSWORD=admin psql -U postgres -h localhost insights < tests/src/fixtures/insights.sql From 1d5dd9c7b3766f2b4e3b9b62c8b72dc5d4b52dfe Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 20:45:25 +0545 Subject: [PATCH 041/153] added config section and added insight only --- .github/workflows/Unit-Test.yml | 9 +++++---- 1 file changed, 5 insertions(+), 4 deletions(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 964dbb0a..372cecbf 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -18,7 +18,7 @@ jobs: image: postgis/postgis:14-3.3 env: POSTGRES_PASSWORD: admin - POSTGRES_DB: postgres + POSTGRES_DB: insights ports: - 5432:5432 options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 2 @@ -32,9 +32,7 @@ jobs: - name: Install gdal run: | sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - - name: Create Database - run: | - PGPASSWORD=admin psql -U postgres -h localhost -c "CREATE DATABASE underpass;" + - name: Insert sample data run : | PGPASSWORD=admin psql -U postgres -h localhost insights < tests/src/fixtures/insights.sql @@ -43,6 +41,9 @@ jobs: python -m pip install --upgrade pip pip install -r requirements.txt pip install -e . + -name: Creating config.txt + run: | + mv src/config.txt.sample src/config.txt - name: Run Tests run: | py.test -v -s From 499bb3fe66ba44f06ecc0b4a8fcd0c9a701723e0 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 20:47:14 +0545 Subject: [PATCH 042/153] removed space --- .github/workflows/Unit-Test.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 372cecbf..c08aa597 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -32,7 +32,6 @@ jobs: - name: Install gdal run: | sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - - name: Insert sample data run : | PGPASSWORD=admin psql -U postgres -h localhost insights < tests/src/fixtures/insights.sql From c8e937a8013e839fb210efe4f8538850f0c4a87b Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 20:57:00 +0545 Subject: [PATCH 043/153] testing workflow --- .github/workflows/Unit-Test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index c08aa597..de5b3978 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -32,7 +32,7 @@ jobs: - name: Install gdal run: | sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - - name: Insert sample data + - name: Insert sample db data run : | PGPASSWORD=admin psql -U postgres -h localhost insights < tests/src/fixtures/insights.sql - name: Install Dependencies From 6a7040ae1a7ba8463d2508085827b1d714a24972 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 20:58:35 +0545 Subject: [PATCH 044/153] removed space --- .github/workflows/Unit-Test.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index de5b3978..a63c0ffe 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -22,7 +22,6 @@ jobs: ports: - 5432:5432 options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 2 - steps: - uses: actions/checkout@v2 - name: Set up Python 3.8 From a2ceecd0194ba12f80d532a68840d5b8bc95701d Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 20:59:57 +0545 Subject: [PATCH 045/153] fixed yml error --- .github/workflows/Unit-Test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index a63c0ffe..68f8b2ff 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -39,7 +39,7 @@ jobs: python -m pip install --upgrade pip pip install -r requirements.txt pip install -e . - -name: Creating config.txt + - name: Creating config.txt run: | mv src/config.txt.sample src/config.txt - name: Run Tests From 340766a3b712d159f48aadd502672c1d23d11ba0 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 21:08:08 +0545 Subject: [PATCH 046/153] test --- .github/workflows/Unit-Test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 68f8b2ff..17522952 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -33,7 +33,7 @@ jobs: sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - name: Insert sample db data run : | - PGPASSWORD=admin psql -U postgres -h localhost insights < tests/src/fixtures/insights.sql + PGPASSWORD=admin psql -U postgres -h localhost xyz < tests/src/fixtures/insights.sql - name: Install Dependencies run: | python -m pip install --upgrade pip From 5b7c96d71a55e50657e9d5f9d9ed1e96808ae808 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 21:12:38 +0545 Subject: [PATCH 047/153] added password export option --- .github/workflows/Unit-Test.yml | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 17522952..87148bee 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -33,7 +33,8 @@ jobs: sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - name: Insert sample db data run : | - PGPASSWORD=admin psql -U postgres -h localhost xyz < tests/src/fixtures/insights.sql + export PGPASSWORD='password'; + psql -U postgres -h localhost insights < tests/src/fixtures/insights.sql - name: Install Dependencies run: | python -m pip install --upgrade pip From 4a6a78c5c90ac6a4bf0bf81e7525afbed7130b7b Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 21:12:55 +0545 Subject: [PATCH 048/153] changed password --- .github/workflows/Unit-Test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 87148bee..4d767d0f 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -33,7 +33,7 @@ jobs: sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - name: Insert sample db data run : | - export PGPASSWORD='password'; + export PGPASSWORD='admin'; psql -U postgres -h localhost insights < tests/src/fixtures/insights.sql - name: Install Dependencies run: | From ed97604c8a298bb9808b7e5b676fe73e74e4b174 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 21:20:59 +0545 Subject: [PATCH 049/153] added db add data --- .github/workflows/Unit-Test.yml | 14 ++++++++++++-- 1 file changed, 12 insertions(+), 2 deletions(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 4d767d0f..21294794 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -18,7 +18,7 @@ jobs: image: postgis/postgis:14-3.3 env: POSTGRES_PASSWORD: admin - POSTGRES_DB: insights + POSTGRES_DB: postgres ports: - 5432:5432 options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 2 @@ -31,10 +31,20 @@ jobs: - name: Install gdal run: | sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - - name: Insert sample db data + - name: Create Databases run : | export PGPASSWORD='admin'; + psql -U postgres -h localhost -c "CREATE DATABASE insights;" + psql -U postgres -h localhost -c "CREATE DATABASE underpass;" + psql -U postgres -h localhost -c "CREATE DATABASE tm;" + psql -U postgres -h localhost -c "CREATE DATABASE raw;" + + - name: Insert sample db data + run : | psql -U postgres -h localhost insights < tests/src/fixtures/insights.sql + psql -U postgres -h localhost raw < tests/src/fixtures/raw_data.sql + psql -U postgres -h localhost underpass < tests/src/fixtures/underpass.sql + - name: Install Dependencies run: | python -m pip install --upgrade pip From 64c454484f073f0c3538441766cbf7d31235e300 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 21:25:18 +0545 Subject: [PATCH 050/153] reverted --- .github/workflows/Unit-Test.yml | 13 ++----------- 1 file changed, 2 insertions(+), 11 deletions(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 21294794..e8497cec 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -18,7 +18,7 @@ jobs: image: postgis/postgis:14-3.3 env: POSTGRES_PASSWORD: admin - POSTGRES_DB: postgres + POSTGRES_DB: insights ports: - 5432:5432 options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 2 @@ -31,19 +31,10 @@ jobs: - name: Install gdal run: | sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - - name: Create Databases - run : | - export PGPASSWORD='admin'; - psql -U postgres -h localhost -c "CREATE DATABASE insights;" - psql -U postgres -h localhost -c "CREATE DATABASE underpass;" - psql -U postgres -h localhost -c "CREATE DATABASE tm;" - psql -U postgres -h localhost -c "CREATE DATABASE raw;" - - name: Insert sample db data run : | + export PGPASSWORD='admin'; psql -U postgres -h localhost insights < tests/src/fixtures/insights.sql - psql -U postgres -h localhost raw < tests/src/fixtures/raw_data.sql - psql -U postgres -h localhost underpass < tests/src/fixtures/underpass.sql - name: Install Dependencies run: | From 93bd4533474c907a014f7f19e31cd04a230ada19 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 21:33:26 +0545 Subject: [PATCH 051/153] droped idea of db insert --- .github/workflows/Unit-Test.yml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index e8497cec..cb441c8a 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -18,7 +18,7 @@ jobs: image: postgis/postgis:14-3.3 env: POSTGRES_PASSWORD: admin - POSTGRES_DB: insights + POSTGRES_DB: postgres ports: - 5432:5432 options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 2 @@ -31,10 +31,6 @@ jobs: - name: Install gdal run: | sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - - name: Insert sample db data - run : | - export PGPASSWORD='admin'; - psql -U postgres -h localhost insights < tests/src/fixtures/insights.sql - name: Install Dependencies run: | @@ -46,4 +42,4 @@ jobs: mv src/config.txt.sample src/config.txt - name: Run Tests run: | - py.test -v -s + py.test -v -s \ No newline at end of file From 4c92ca5cfc48019e7eb78750fc68e29c56fd6abc Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 21:42:25 +0545 Subject: [PATCH 052/153] changed to psql 12 --- .github/workflows/Unit-Test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index cb441c8a..2697b299 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -15,7 +15,7 @@ jobs: ubuntu-latest services: postgres: - image: postgis/postgis:14-3.3 + image: postgis/postgis:12-3.3 env: POSTGRES_PASSWORD: admin POSTGRES_DB: postgres From c63e9744766a12751ffadae58bb1f030ee7197ce Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 21:52:02 +0545 Subject: [PATCH 053/153] added psql 14 postgis --- .github/workflows/Unit-Test.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 2697b299..e9df702b 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -20,7 +20,7 @@ jobs: POSTGRES_PASSWORD: admin POSTGRES_DB: postgres ports: - - 5432:5432 + - 5434:5432 options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 2 steps: - uses: actions/checkout@v2 @@ -31,7 +31,9 @@ jobs: - name: Install gdal run: | sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - + - name: Install Postgis Extension + run: | + sudo apt install postgis postgresql-14-postgis-3 - name: Install Dependencies run: | python -m pip install --upgrade pip From cd697cee19c307340c7260af1a6360d8a1ea6df3 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 22:04:15 +0545 Subject: [PATCH 054/153] changed scripts --- .github/workflows/Unit-Test.yml | 5 +++-- 1 file changed, 3 insertions(+), 2 deletions(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index e9df702b..bb57ac1c 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -15,7 +15,7 @@ jobs: ubuntu-latest services: postgres: - image: postgis/postgis:12-3.3 + image: postgis/postgis:14-3.3 env: POSTGRES_PASSWORD: admin POSTGRES_DB: postgres @@ -33,7 +33,8 @@ jobs: sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - name: Install Postgis Extension run: | - sudo apt install postgis postgresql-14-postgis-3 + sudo apt update && sudo apt -y install postgresql-14-postgis-3-scripts + - name: Install Dependencies run: | python -m pip install --upgrade pip From f9538a0d658803129cfd84d64b83c1fa6ccb7e7e Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 22:12:54 +0545 Subject: [PATCH 055/153] final test for psql 14 --- .github/workflows/Unit-Test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index bb57ac1c..7145cba3 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -33,7 +33,7 @@ jobs: sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - name: Install Postgis Extension run: | - sudo apt update && sudo apt -y install postgresql-14-postgis-3-scripts + sudo apt -y install postgresql-14-postgis-3 - name: Install Dependencies run: | From 97498fff2d44f263419aaac1f14b1d78b755558d Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 22:16:33 +0545 Subject: [PATCH 056/153] check psql version --- .github/workflows/Unit-Test.yml | 6 ++++++ 1 file changed, 6 insertions(+) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 7145cba3..c81aed2c 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -28,9 +28,15 @@ jobs: uses: actions/setup-python@v1 with: python-version: 3.8 + - name: Check postgresql version 1 + run: | + psql -V - name: Install gdal run: | sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean + - name: Check postgresql version 2 + run: | + psql -V - name: Install Postgis Extension run: | sudo apt -y install postgresql-14-postgis-3 From aae75722ee4aed9706800190f6a4264cde4aee90 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 22:21:52 +0545 Subject: [PATCH 057/153] apt install --- .github/workflows/Unit-Test.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index c81aed2c..53601675 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -39,7 +39,7 @@ jobs: psql -V - name: Install Postgis Extension run: | - sudo apt -y install postgresql-14-postgis-3 + sudo apt install postgresql-14-postgis-3 - name: Install Dependencies run: | From a285edff70b931aadb5f18e4873b93eedf1772b4 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 22:25:14 +0545 Subject: [PATCH 058/153] changed postgis command --- .github/workflows/Unit-Test.yml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 53601675..9bc5bdfd 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -28,18 +28,16 @@ jobs: uses: actions/setup-python@v1 with: python-version: 3.8 - - name: Check postgresql version 1 + - name: Check postgresql version run: | psql -V - name: Install gdal run: | sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - - name: Check postgresql version 2 - run: | - psql -V + - name: Install Postgis Extension run: | - sudo apt install postgresql-14-postgis-3 + sudo apt install postgis - name: Install Dependencies run: | From e916c5baf5a844a98ca95862159302c09e656509 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 22:31:08 +0545 Subject: [PATCH 059/153] opted old method --- .github/workflows/Unit-Test.yml | 17 +++++++++++------ 1 file changed, 11 insertions(+), 6 deletions(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 9bc5bdfd..68e9ac8a 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -31,14 +31,19 @@ jobs: - name: Check postgresql version run: | psql -V - - name: Install gdal + - name: Remove postgresql version 14 run: | - sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - - - name: Install Postgis Extension + sudo apt-get --purge remove postgresql + sudo apt-get purge postgresql* + sudo apt-get --purge remove postgresql postgresql-doc postgresql-common + - name: Set up postgresql 12 + uses: harmon758/postgresql-action@v1 + with: + postgresql version: '12' + - name: Install Postgis 3 for Psql 12 run: | - sudo apt install postgis - + sudo apt-get update + sudo apt install postgis postgresql-12-postgis-3 - name: Install Dependencies run: | python -m pip install --upgrade pip From 7501bb4f5612f3ae72800effdb28a087448d1c4b Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 22:36:00 +0545 Subject: [PATCH 060/153] final test --- .github/workflows/Unit-Test.yml | 17 ++++++++++++++++- 1 file changed, 16 insertions(+), 1 deletion(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 68e9ac8a..bbb9e091 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -18,7 +18,7 @@ jobs: image: postgis/postgis:14-3.3 env: POSTGRES_PASSWORD: admin - POSTGRES_DB: postgres + POSTGRES_DB: insights ports: - 5434:5432 options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 2 @@ -44,6 +44,21 @@ jobs: run: | sudo apt-get update sudo apt install postgis postgresql-12-postgis-3 + + - name: Create Databases + run : | + export PGPASSWORD='admin'; + psql -U postgres -h localhost -p 5434 -c "CREATE DATABASE insights;" + psql -U postgres -h localhost -p 5434 -c "CREATE DATABASE underpass;" + psql -U postgres -h localhost -p 5434 -c "CREATE DATABASE tm;" + psql -U postgres -h localhost -p 5434 -c "CREATE DATABASE raw;" + + - name: Insert sample db data + run : | + psql -U postgres -h localhost insights < tests/src/fixtures/insights.sql + psql -U postgres -h localhost raw < tests/src/fixtures/raw_data.sql + psql -U postgres -h localhost underpass < tests/src/fixtures/underpass.sql + - name: Install Dependencies run: | python -m pip install --upgrade pip From c882905127210554ed5940bfdbff106cfdb83b78 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 22:41:43 +0545 Subject: [PATCH 061/153] setup test with the database --- .github/workflows/Unit-Test.yml | 8 +++----- 1 file changed, 3 insertions(+), 5 deletions(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index bbb9e091..49fd8576 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -28,10 +28,7 @@ jobs: uses: actions/setup-python@v1 with: python-version: 3.8 - - name: Check postgresql version - run: | - psql -V - - name: Remove postgresql version 14 + - name: Clean up PSQL run: | sudo apt-get --purge remove postgresql sudo apt-get purge postgresql* @@ -48,7 +45,6 @@ jobs: - name: Create Databases run : | export PGPASSWORD='admin'; - psql -U postgres -h localhost -p 5434 -c "CREATE DATABASE insights;" psql -U postgres -h localhost -p 5434 -c "CREATE DATABASE underpass;" psql -U postgres -h localhost -p 5434 -c "CREATE DATABASE tm;" psql -U postgres -h localhost -p 5434 -c "CREATE DATABASE raw;" @@ -58,6 +54,8 @@ jobs: psql -U postgres -h localhost insights < tests/src/fixtures/insights.sql psql -U postgres -h localhost raw < tests/src/fixtures/raw_data.sql psql -U postgres -h localhost underpass < tests/src/fixtures/underpass.sql + wget https://raw.githubusercontent.com/hotosm/tasking-manager/develop/tests/database/tasking-manager.sql + psql -U postgres -h localhost tm < tasking-manager.sql - name: Install Dependencies run: | From 3a66d87269191f0ff68518b8671dd4cfd79f7d9b Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 22:44:13 +0545 Subject: [PATCH 062/153] added port info --- .github/workflows/Unit-Test.yml | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 49fd8576..4f578ff2 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -51,11 +51,11 @@ jobs: - name: Insert sample db data run : | - psql -U postgres -h localhost insights < tests/src/fixtures/insights.sql - psql -U postgres -h localhost raw < tests/src/fixtures/raw_data.sql - psql -U postgres -h localhost underpass < tests/src/fixtures/underpass.sql + psql -U postgres -h localhost -p 5434 insights < tests/src/fixtures/insights.sql + psql -U postgres -h localhost -p 5434 raw < tests/src/fixtures/raw_data.sql + psql -U postgres -h localhost -p 5434 underpass < tests/src/fixtures/underpass.sql wget https://raw.githubusercontent.com/hotosm/tasking-manager/develop/tests/database/tasking-manager.sql - psql -U postgres -h localhost tm < tasking-manager.sql + psql -U postgres -h localhost -p 5434 tm < tasking-manager.sql - name: Install Dependencies run: | From b09f7272e16dfbb9d82bbd3e294ce725884abec7 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 22:49:31 +0545 Subject: [PATCH 063/153] exported password --- .github/workflows/Unit-Test.yml | 1 + 1 file changed, 1 insertion(+) diff --git a/.github/workflows/Unit-Test.yml b/.github/workflows/Unit-Test.yml index 4f578ff2..d10314eb 100644 --- a/.github/workflows/Unit-Test.yml +++ b/.github/workflows/Unit-Test.yml @@ -51,6 +51,7 @@ jobs: - name: Insert sample db data run : | + export PGPASSWORD='admin'; psql -U postgres -h localhost -p 5434 insights < tests/src/fixtures/insights.sql psql -U postgres -h localhost -p 5434 raw < tests/src/fixtures/raw_data.sql psql -U postgres -h localhost -p 5434 underpass < tests/src/fixtures/underpass.sql From 5e2dd878a53e8e080fcd5738bad7656141646c3d Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 22:53:10 +0545 Subject: [PATCH 064/153] binded port of redis --- docker-compose.yml | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docker-compose.yml b/docker-compose.yml index c8a6a2ab..c9c79d90 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -23,6 +23,8 @@ services: redis: image: redis:6-alpine + ports: + - "6379:6379" worker-dashboard: build: . From 338f5573f76e5d6323e318fb64ca9e747904fa1b Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 23:14:56 +0545 Subject: [PATCH 065/153] updated doc for the docker compose --- docs/CONFIG_DOC.md | 16 ++++++++-------- 1 file changed, 8 insertions(+), 8 deletions(-) diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index f8673d72..0ac7b890 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -7,7 +7,7 @@ Before getting started on config Make sure you have https://www.postgresql.org/ ### 2. Setup Underpass - Run underpass from [here](https://github.com/hotosm/underpass/blob/master/doc/getting-started.md) OR Create database "underpass" in your local postgres and insert sample dump from + Run underpass from [here](https://github.com/hotosm/underpass/blob/master/doc/getting-started.md) OR Create database "underpass" in your local postgres and insert sample dump from ``` /tests/src/fixtures/underpass.sql ``` @@ -16,9 +16,9 @@ Before getting started on config Make sure you have https://www.postgresql.org/ psql -U postgres -h localhost underpass < underpass.sql ``` ### 3. Setup Insights -Setup insights from [here](https://github.com/hotosm/insights) OR Create database "insights" in your local postgres and insert sample dump from +Setup insights from [here](https://github.com/hotosm/insights) OR Create database "insights" in your local postgres and insert sample dump from ``` -/tests/src/fixtures/insights.sql +/tests/src/fixtures/insights.sql ``` ``` @@ -26,9 +26,9 @@ psql -U postgres -h localhost insights < insights.sql ``` ### 4. Setup Raw Data -Initialize rawdata from [here](https://github.com/hotosm/underpass/tree/master/raw) OR Create database "raw" in your local postgres and insert sample dump from +Initialize rawdata from [here](https://github.com/hotosm/underpass/tree/master/raw) OR Create database "raw" in your local postgres and insert sample dump from ``` -/tests/src/fixtures/raw_data.sql +/tests/src/fixtures/raw_data.sql ``` ``` @@ -115,12 +115,12 @@ BUCKET_NAME= your bucket name Celery Configuration options: -Galaxy API uses Celery 5 and Redis for task queue management , Currently implemented for Rawdata endpoint. 6379 is the default port , You can change the port according to your configuration +Galaxy API uses Celery 5 and Redis for task queue management , Currently implemented for Rawdata endpoint. 6379 is the default port , You can change the port according to your configuration , for the local setup Broker URL could be redis://localhost:6379/0 , for the current docker compose use following ``` [CELERY] -CELERY_BROKER_URL=redis://localhost:6379 -CELERY_RESULT_BACKEND=redis://localhost:6379 +CELERY_BROKER_URL=redis://redis:6379/0 +CELERY_RESULT_BACKEND=redis://redis:6379/0 ``` ##### Setup Tasking Manager Database for TM related development From 5121305bdc65710e495e6a550c8a00c3b8705b22 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 23:15:53 +0545 Subject: [PATCH 066/153] updated sample --- src/config.txt.sample | 4 ++++ 1 file changed, 4 insertions(+) diff --git a/src/config.txt.sample b/src/config.txt.sample index 37f3b061..c4569c57 100644 --- a/src/config.txt.sample +++ b/src/config.txt.sample @@ -36,3 +36,7 @@ secret_key=PutSomethingRandmHere [API_CONFIG] env=dev + +[CELERY] +CELERY_BROKER_URL=redis://redis:6379/0 +CELERY_RESULT_BACKEND=redis://redis:6379/0 \ No newline at end of file From 28946dc6d0ba0b37eee78dd386b29085df91c3d0 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Thu, 22 Sep 2022 23:18:50 +0545 Subject: [PATCH 067/153] Update GETTING_STARTED_WITH_DOCKER.md --- docs/GETTING_STARTED_WITH_DOCKER.md | 7 +++++++ 1 file changed, 7 insertions(+) diff --git a/docs/GETTING_STARTED_WITH_DOCKER.md b/docs/GETTING_STARTED_WITH_DOCKER.md index 66e42221..6843f171 100644 --- a/docs/GETTING_STARTED_WITH_DOCKER.md +++ b/docs/GETTING_STARTED_WITH_DOCKER.md @@ -18,8 +18,15 @@ Uvicorn should be running on 8000 port , Redis on default port , Celery with a w ``` http://127.0.0.1:8000/latest/docs ``` +API Docs will be displayed like this upon uvicorn successfull server start +![image](https://user-images.githubusercontent.com/36752999/191813795-fdfd46fe-5e6c-4ecf-be9b-f9f351d3d1d7.png) + ``` http://127.0.0.1:8000/5550/ ``` +Flower dashboard will look like this on successfull installation with a worker online +![image](https://user-images.githubusercontent.com/36752999/191813613-3859522b-ea68-4370-87b2-ebd1d8880d80.png) + + Now, Continue Readme. Check installation from [here](https://github.com/hotosm/galaxy-api/blob/feature/celery/README.md#check-api-installation) From 3bf13e81530b7b407c7f82fcdbea8201f80fff80 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Thu, 22 Sep 2022 23:27:49 +0545 Subject: [PATCH 068/153] Update CONFIG_DOC.md --- docs/CONFIG_DOC.md | 34 ++++++++++++++++++++++++---------- 1 file changed, 24 insertions(+), 10 deletions(-) diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index 0ac7b890..7e8dcbd2 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -1,6 +1,7 @@ Before getting started on config Make sure you have https://www.postgresql.org/ setup in your machine. +## Compulsary Configuration ### 1. Create ```config.txt``` inside src directory. ![image](https://user-images.githubusercontent.com/36752999/188402566-80dc9633-5d4e-479c-97dc-9e8a4999b385.png) @@ -51,7 +52,7 @@ Grab Client ID and Client Secret and put it inside config.txt as OAUTH Block , y ### 6. Put your credentials inside config.txt -Insert your config blocks with the database credentials where you have underpass ,insight and tm in your database +Insert your config blocks with the database credentials where you have underpass ,insight and rawdata in your database along with oauth block ``` [INSIGHTS] @@ -88,7 +89,28 @@ env=dev ``` -#### Optional Configuration +**Celery Configuration options:** + +Galaxy API uses [Celery 5](https://docs.celeryq.dev/en/stable/getting-started/first-steps-with-celery.html) and [Redis 6](https://redis.io/download/#redis-stack-downloads) for task queue management , Currently implemented for Rawdata endpoint. 6379 is the default port , You can change the port according to your configuration , for the local setup Broker URL could be redis://localhost:6379/0 , for the current docker compose use following + +**For local installation :** +``` +[CELERY] +CELERY_BROKER_URL=redis://localhost:6379/0 +CELERY_RESULT_BACKEND=redis://localhost:6379/0 +``` + + +**For Docker :** +``` +[CELERY] +CELERY_BROKER_URL=redis://redis:6379/0 +CELERY_RESULT_BACKEND=redis://redis:6379/0 +``` + +**Tips** : Follow .github/workflows/[unit-test](https://github.com/hotosm/galaxy-api/blob/feature/celery/.github/workflows/unit-test.yml) If you have any confusion on implementation of config file . + +## Optional Configuration [ You can skip this part for basic installation ] You can further customize API if you wish with API_CONFIG Block @@ -113,15 +135,7 @@ AWS_SECRET_ACCESS_KEY= yourkey BUCKET_NAME= your bucket name ``` -Celery Configuration options: - -Galaxy API uses Celery 5 and Redis for task queue management , Currently implemented for Rawdata endpoint. 6379 is the default port , You can change the port according to your configuration , for the local setup Broker URL could be redis://localhost:6379/0 , for the current docker compose use following -``` -[CELERY] -CELERY_BROKER_URL=redis://redis:6379/0 -CELERY_RESULT_BACKEND=redis://redis:6379/0 -``` ##### Setup Tasking Manager Database for TM related development Setup Tasking manager from [here](https://github.com/hotosm/tasking-manager/blob/develop/docs/developers/development-setup.md#backend) OR Create database "tm" in your local postgres and insert sample dump from [TM test dump](https://github.com/hotosm/tasking-manager/blob/develop/tests/database/tasking-manager.sql). From 0b63d6a06fac2e40f8252113b9d000821d6f7ab0 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Thu, 22 Sep 2022 23:46:13 +0545 Subject: [PATCH 069/153] Update CONFIG_DOC.md --- docs/CONFIG_DOC.md | 168 ++++++++++++++++++++++++++++++--------------- 1 file changed, 114 insertions(+), 54 deletions(-) diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index 7e8dcbd2..08de9883 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -1,5 +1,5 @@ -Before getting started on config Make sure you have https://www.postgresql.org/ setup in your machine. +Before getting started on config Make sure you have [Postgres](https://www.postgresql.org/) and [Postgis](https://postgis.net/) setup in your machine. ## Compulsary Configuration @@ -16,7 +16,17 @@ Before getting started on config Make sure you have https://www.postgresql.org/ ``` psql -U postgres -h localhost underpass < underpass.sql ``` -### 3. Setup Insights +Put your credentials in Underpass block +``` +[UNDERPASS] +host=localhost +user=postgres +password=admin +database=underpass +port=5432 +``` + +### 3. Setup Insights for Historical Data Setup insights from [here](https://github.com/hotosm/insights) OR Create database "insights" in your local postgres and insert sample dump from ``` /tests/src/fixtures/insights.sql @@ -25,8 +35,17 @@ Setup insights from [here](https://github.com/hotosm/insights) OR Create databas ``` psql -U postgres -h localhost insights < insights.sql ``` +Put your credentials in insights block +``` +[INSIGHTS] +host=localhost +user=postgres +password=admin +database=insights +port=5432 +``` -### 4. Setup Raw Data +### 4. Setup Raw Data for Current OSM Snapshot Initialize rawdata from [here](https://github.com/hotosm/underpass/tree/master/raw) OR Create database "raw" in your local postgres and insert sample dump from ``` /tests/src/fixtures/raw_data.sql @@ -35,9 +54,39 @@ Initialize rawdata from [here](https://github.com/hotosm/underpass/tree/master/r ``` psql -U postgres -h localhost raw < raw_data.sql ``` +Put your credentials on Rawdata block +``` +[RAW_DATA] +host=localhost +user=postgres +password=admin +database=raw +port=5432 +``` -### 5. Setup Oauth +### 5. Setup Tasking Manager Database for TM related development + +Setup Tasking manager from [here](https://github.com/hotosm/tasking-manager/blob/develop/docs/developers/development-setup.md#backend) OR Create database "tm" in your local postgres and insert sample dump from [TM test dump](https://github.com/hotosm/tasking-manager/blob/develop/tests/database/tasking-manager.sql). + +``` +wget https://raw.githubusercontent.com/hotosm/tasking-manager/develop/tests/database/tasking-manager.sql +``` + +``` +psql -U postgres -h localhost tm < tasking-manager.sql +``` +Put your credentials on TM block +``` +[TM] +host=localhost +user=postgres +password=admin +database=tm +port=5432 +``` + +### 6. Setup Oauth for Authentication Login to [OSM](https://www.openstreetmap.org/) , Click on My Settings and register your local galaxy app to Oauth2applications ![image](https://user-images.githubusercontent.com/36752999/188452619-aababf28-b685-4141-b381-9c25d0367b57.png) @@ -50,23 +99,70 @@ http://127.0.0.1:8000/latest/auth/callback/ Grab Client ID and Client Secret and put it inside config.txt as OAUTH Block , you can generate secret key for your application by yourself +``` +[OAUTH] +client_id= your client id +client_secret= your client secret +url=https://www.openstreetmap.org +scope=read_prefs +login_redirect_uri=http://127.0.0.1:8000/latest/auth/callback/ +secret_key=jnfdsjkfndsjkfnsdkjfnskfn +``` + +### 7. Configure celery and redis + +Galaxy API uses [Celery 5](https://docs.celeryq.dev/en/stable/getting-started/first-steps-with-celery.html) and [Redis 6](https://redis.io/download/#redis-stack-downloads) for task queue management , Currently implemented for Rawdata endpoint. 6379 is the default port , You can change the port according to your configuration , for the local setup Broker URL could be redis://localhost:6379/0 , for the current docker compose use following + +**For local installation :** +``` +[CELERY] +CELERY_BROKER_URL=redis://localhost:6379/0 +CELERY_RESULT_BACKEND=redis://localhost:6379/0 +``` + +**For Docker :** +``` +[CELERY] +CELERY_BROKER_URL=redis://redis:6379/0 +CELERY_RESULT_BACKEND=redis://redis:6379/0 +``` -### 6. Put your credentials inside config.txt +### 7. Finalizing config.txt Insert your config blocks with the database credentials where you have underpass ,insight and rawdata in your database along with oauth block +Summary of command : + +Considering You have PSQL-POSTGIS setup with user **postgres** host **localhost** on port **5432** as password **admin** + ``` -[INSIGHTS] + export PGPASSWORD='admin'; + psql -U postgres -h localhost -p 5432 -c "CREATE DATABASE underpass;" + psql -U postgres -h localhost -p 5432 -c "CREATE DATABASE tm;" + psql -U postgres -h localhost -p 5432 -c "CREATE DATABASE raw;" + + cd tests/src/fixtures/ + psql -U postgres -h localhost -p 5432 insights < insights.sql + psql -U postgres -h localhost -p 5432 raw < raw_data.sql + psql -U postgres -h localhost -p 5432 underpass < underpass.sql + wget https://raw.githubusercontent.com/hotosm/tasking-manager/develop/tests/database/tasking-manager.sql + psql -U postgres -h localhost -p 5432 tm < tasking-manager.sql +``` + +Your config.txt will look like this + +``` +[UNDERPASS] host=localhost user=postgres password=admin -database=insights +database=underpass port=5432 -[UNDERPASS] +[INSIGHTS] host=localhost user=postgres password=admin -database=underpass +database=insights port=5432 [RAW_DATA] @@ -76,6 +172,13 @@ password=admin database=raw port=5432 +[TM] +host=localhost +user=postgres +password=admin +database=tm +port=5432 + [OAUTH] client_id= your client id client_secret= your client secret @@ -86,26 +189,12 @@ secret_key=jnfdsjkfndsjkfnsdkjfnskfn [API_CONFIG] env=dev +log_level=debug -``` - -**Celery Configuration options:** - -Galaxy API uses [Celery 5](https://docs.celeryq.dev/en/stable/getting-started/first-steps-with-celery.html) and [Redis 6](https://redis.io/download/#redis-stack-downloads) for task queue management , Currently implemented for Rawdata endpoint. 6379 is the default port , You can change the port according to your configuration , for the local setup Broker URL could be redis://localhost:6379/0 , for the current docker compose use following - -**For local installation :** -``` -[CELERY] -CELERY_BROKER_URL=redis://localhost:6379/0 -CELERY_RESULT_BACKEND=redis://localhost:6379/0 -``` - - -**For Docker :** -``` [CELERY] CELERY_BROKER_URL=redis://redis:6379/0 CELERY_RESULT_BACKEND=redis://redis:6379/0 + ``` **Tips** : Follow .github/workflows/[unit-test](https://github.com/hotosm/galaxy-api/blob/feature/celery/.github/workflows/unit-test.yml) If you have any confusion on implementation of config file . @@ -135,32 +224,3 @@ AWS_SECRET_ACCESS_KEY= yourkey BUCKET_NAME= your bucket name ``` - -##### Setup Tasking Manager Database for TM related development - -Setup Tasking manager from [here](https://github.com/hotosm/tasking-manager/blob/develop/docs/developers/development-setup.md#backend) OR Create database "tm" in your local postgres and insert sample dump from [TM test dump](https://github.com/hotosm/tasking-manager/blob/develop/tests/database/tasking-manager.sql). - -``` -wget https://raw.githubusercontent.com/hotosm/tasking-manager/develop/tests/database/tasking-manager.sql -``` - - -``` -psql -U postgres -h localhost tm < tasking-manager.sql -``` - -Add those block to config.txt with the value you use in the tasking manager configuration. -``` -[TM] -host=localhost -user=postgres -password=admin -database=tm -port=5432 -``` - -You can test it later after running server with the `/mapathon/detail/` endpoint and with the following input: -` -{"fromTimestamp":"2019-04-08 10:00:00.000000","toTimestamp":"2019-04-08 11:00:00.000000","projectIds":[1],"hashtags":[]} -` - From 194628eab1c2f6aa17f2e7c27a6f1e4b8038bcb1 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Thu, 22 Sep 2022 23:49:56 +0545 Subject: [PATCH 070/153] Update README.md --- README.md | 49 ++++++++++++++++++++++++++++--------------------- 1 file changed, 28 insertions(+), 21 deletions(-) diff --git a/README.md b/README.md index 8c6a742a..7c81e24c 100644 --- a/README.md +++ b/README.md @@ -9,19 +9,33 @@ To get started with docker follow [GETTING_STARTED_WITH_DOCKER](https://github.c ### 1. Install requirements. -Install [gdal](https://gdal.org/index.html) on your machine , for example on Ubuntu +- Install [gdal](https://gdal.org/index.html) on your machine , for example on Ubuntu ``` sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean ``` -Install [redis](https://redis.io/docs/getting-started/installation/) on your system +- Install [redis](https://redis.io/docs/getting-started/installation/) on your system ``` sudo apt-get install redis ``` -Clone the Repo to your machine +- Check Redis server + +Check redis is running on your machine + +Login to redis cli + +``` +redis-cli +``` + +Hit ```ping``` it should return pong + +If REDIS is not running check out its [documentation](https://redis.io/docs/getting-started/) + +- Clone the Repo to your machine ``` git clone https://github.com/hotosm/galaxy-api.git @@ -33,7 +47,7 @@ Navigate to repo cd galaxy-api ``` -Install python dependencies +- Install python dependencies ``` pip install -r requirements.txt @@ -57,28 +71,14 @@ Setup necessary config for API from [docs/CONFIG.DOC](https://github.com/hotosm/ uvicorn API.main:app --reload ``` -### 4. Check Redis server - -Check redis is running on your machine - -Login to redis cli - -``` -redis-cli -``` - -Hit ```ping``` it should return pong - -If REDIS is not running check out its [documentation](https://redis.io/docs/getting-started/) - -### 5. Start Celery Worker +### 4. Start Celery Worker You should be able to start [celery](https://docs.celeryq.dev/en/stable/getting-started/first-steps-with-celery.html#running-the-celery-worker-server) worker by running following command on different shell ``` celery --app API.api_worker worker --loglevel=INFO ``` -### 6 . [OPTIONAL] Start flower for monitoring queue +### 5 . [OPTIONAL] Start flower for monitoring queue API uses flower for monitoring the Celery distributed queue. Run this command on different shell @@ -86,7 +86,7 @@ API uses flower for monitoring the Celery distributed queue. Run this command on celery --app API.api_worker flower --port=5550 --broker=redis://redis:6379/ ``` -### 7. Navigate to Fast API Docs to get details about API Endpoint +### 6. Navigate to Fast API Docs to get details about API Endpoint After sucessfully running server , hit [this](http://127.0.0.1:8000/latest/docs) URL on your browser @@ -116,6 +116,13 @@ INSERT INTO users_roles VALUES (ID, 1); Repeat the steps to get a new access_token. +You can test with the `/mapathon/detail/` endpoint with the following input to check both authentication and database connection + +``` +{"fromTimestamp":"2019-04-08 10:00:00.000000","toTimestamp":"2019-04-08 11:00:00.000000","projectIds":[1],"hashtags":[]} +``` + + #### API has been setup successfully ! From 4f1037ab3592ec7d27ce63c9dc9eff45d3899ea4 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Thu, 22 Sep 2022 23:58:05 +0545 Subject: [PATCH 071/153] Check if fails or not for worker --- API/api_worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/API/api_worker.py b/API/api_worker.py index 9a37106e..de7f3b73 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -115,7 +115,7 @@ def process_raw_data(self, incoming_scheme, incoming_host, params): "zip_file_size_bytes": zip_file_size, } except Exception as ex: - self.update_state(state='FAILURE', meta={'exc': ex}) + self.update_state(state='FAILURE', meta={'exc': 'Failed'}) From 88f857a3f1f93a527526c462e0bf5031882bebe6 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 00:55:54 +0545 Subject: [PATCH 072/153] handled error --- API/api_worker.py | 4 ++-- API/raw_data.py | 5 ++--- docs/CONFIG_DOC.md | 26 +++++++++----------------- 3 files changed, 13 insertions(+), 22 deletions(-) diff --git a/API/api_worker.py b/API/api_worker.py index de7f3b73..61b04256 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -21,7 +21,7 @@ ) # using redis as backend , make sure you have redis server started on your system on port 6379 celery.conf.task_serializer = 'pickle' -celery.conf.result_serializer = 'json' +celery.conf.result_serializer = 'pickle' celery.conf.accept_content = ['application/json', 'application/x-python-serialize'] @@ -115,7 +115,7 @@ def process_raw_data(self, incoming_scheme, incoming_host, params): "zip_file_size_bytes": zip_file_size, } except Exception as ex: - self.update_state(state='FAILURE', meta={'exc': 'Failed'}) + raise ex diff --git a/API/raw_data.py b/API/raw_data.py index 7d4b46d5..35e18814 100644 --- a/API/raw_data.py +++ b/API/raw_data.py @@ -536,11 +536,10 @@ def get_task_status(task_id): """ task_result = AsyncResult(task_id, app=celery) + result = { "id": task_id, "status": task_result.status, - "status": task_result.state, - - "result": task_result.result + "result": task_result.result if task_result.status == 'SUCCESS' else None } return JSONResponse(result) diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index 08de9883..75dd1d87 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -1,7 +1,7 @@ Before getting started on config Make sure you have [Postgres](https://www.postgresql.org/) and [Postgis](https://postgis.net/) setup in your machine. -## Compulsary Configuration +## Compulsary Configuration ### 1. Create ```config.txt``` inside src directory. ![image](https://user-images.githubusercontent.com/36752999/188402566-80dc9633-5d4e-479c-97dc-9e8a4999b385.png) @@ -16,7 +16,7 @@ Before getting started on config Make sure you have [Postgres](https://www.postg ``` psql -U postgres -h localhost underpass < underpass.sql ``` -Put your credentials in Underpass block +Put your credentials in Underpass block ``` [UNDERPASS] host=localhost @@ -35,7 +35,7 @@ Setup insights from [here](https://github.com/hotosm/insights) OR Create databas ``` psql -U postgres -h localhost insights < insights.sql ``` -Put your credentials in insights block +Put your credentials in insights block ``` [INSIGHTS] host=localhost @@ -54,7 +54,7 @@ Initialize rawdata from [here](https://github.com/hotosm/underpass/tree/master/r ``` psql -U postgres -h localhost raw < raw_data.sql ``` -Put your credentials on Rawdata block +Put your credentials on Rawdata block ``` [RAW_DATA] @@ -111,16 +111,8 @@ secret_key=jnfdsjkfndsjkfnsdkjfnskfn ### 7. Configure celery and redis -Galaxy API uses [Celery 5](https://docs.celeryq.dev/en/stable/getting-started/first-steps-with-celery.html) and [Redis 6](https://redis.io/download/#redis-stack-downloads) for task queue management , Currently implemented for Rawdata endpoint. 6379 is the default port , You can change the port according to your configuration , for the local setup Broker URL could be redis://localhost:6379/0 , for the current docker compose use following +Galaxy API uses [Celery 5](https://docs.celeryq.dev/en/stable/getting-started/first-steps-with-celery.html) and [Redis 6](https://redis.io/download/#redis-stack-downloads) for task queue management , Currently implemented for Rawdata endpoint. 6379 is the default port , You can change the port according to your configuration for the current docker compose use following -**For local installation :** -``` -[CELERY] -CELERY_BROKER_URL=redis://localhost:6379/0 -CELERY_RESULT_BACKEND=redis://localhost:6379/0 -``` - -**For Docker :** ``` [CELERY] CELERY_BROKER_URL=redis://redis:6379/0 @@ -130,7 +122,7 @@ CELERY_RESULT_BACKEND=redis://redis:6379/0 ### 7. Finalizing config.txt Insert your config blocks with the database credentials where you have underpass ,insight and rawdata in your database along with oauth block -Summary of command : +Summary of command : Considering You have PSQL-POSTGIS setup with user **postgres** host **localhost** on port **5432** as password **admin** @@ -139,7 +131,7 @@ Considering You have PSQL-POSTGIS setup with user **postgres** host **localhost psql -U postgres -h localhost -p 5432 -c "CREATE DATABASE underpass;" psql -U postgres -h localhost -p 5432 -c "CREATE DATABASE tm;" psql -U postgres -h localhost -p 5432 -c "CREATE DATABASE raw;" - + cd tests/src/fixtures/ psql -U postgres -h localhost -p 5432 insights < insights.sql psql -U postgres -h localhost -p 5432 raw < raw_data.sql @@ -148,7 +140,7 @@ Considering You have PSQL-POSTGIS setup with user **postgres** host **localhost psql -U postgres -h localhost -p 5432 tm < tasking-manager.sql ``` -Your config.txt will look like this +Your config.txt will look like this ``` [UNDERPASS] @@ -199,7 +191,7 @@ CELERY_RESULT_BACKEND=redis://redis:6379/0 **Tips** : Follow .github/workflows/[unit-test](https://github.com/hotosm/galaxy-api/blob/feature/celery/.github/workflows/unit-test.yml) If you have any confusion on implementation of config file . -## Optional Configuration [ You can skip this part for basic installation ] +## Optional Configuration [ You can skip this part for basic installation ] You can further customize API if you wish with API_CONFIG Block From 394c34cd9a7dba0975cd5b3bfa9341bdd837f109 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 00:57:08 +0545 Subject: [PATCH 073/153] formatted worker --- API/api_worker.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/API/api_worker.py b/API/api_worker.py index 61b04256..8d6cb128 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -25,7 +25,7 @@ celery.conf.accept_content = ['application/json', 'application/x-python-serialize'] -@celery.task(bind=True,name="process_raw_data") +@celery.task(bind=True, name="process_raw_data") def process_raw_data(self, incoming_scheme, incoming_host, params): try: start_time = dt.now() @@ -118,7 +118,6 @@ def process_raw_data(self, incoming_scheme, incoming_host, params): raise ex - def remove_file(path: str) -> None: """Used for removing temp file dir and its all content after zip file is delivered to user""" try: From 59bda45446a99ecc4a43636a30e98bad98eb3e6b Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 09:23:45 +0545 Subject: [PATCH 074/153] added build and separated from unit test --- .github/workflows/build.yml | 64 +++++++++++++++++++++++++++++++++++++ 1 file changed, 64 insertions(+) create mode 100644 .github/workflows/build.yml diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml new file mode 100644 index 00000000..df58e2fb --- /dev/null +++ b/.github/workflows/build.yml @@ -0,0 +1,64 @@ +name: Check Build +on: + push: + branches: + - master + - develop + pull_request: + branches: + - master + - develop + +jobs: + deploy: + runs-on: ubuntu-latest + services: + postgres: + image: postgis/postgis:14-3.3 + env: + POSTGRES_PASSWORD: admin + POSTGRES_DB: insights + ports: + - 5434:5432 + options: --health-cmd pg_isready --health-interval 10s --health-timeout 5s --health-retries 2 + steps: + - uses: actions/checkout@v2 + - name: Set up Python 3.8 + uses: actions/setup-python@v1 + with: + python-version: 3.8 + + - name: Install Gdal + run: | + sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean + + - name: Install redis + run: | + sudo apt-get install redis + redis-cli + ping + + - name: Create Databases + run: | + export PGPASSWORD='admin'; + psql -U postgres -h localhost -p 5434 -c "CREATE DATABASE underpass;" + psql -U postgres -h localhost -p 5434 -c "CREATE DATABASE tm;" + psql -U postgres -h localhost -p 5434 -c "CREATE DATABASE raw;" + + - name: Insert sample db data + run: | + export PGPASSWORD='admin'; + psql -U postgres -h localhost -p 5434 insights < tests/src/fixtures/insights.sql + psql -U postgres -h localhost -p 5434 raw < tests/src/fixtures/raw_data.sql + psql -U postgres -h localhost -p 5434 underpass < tests/src/fixtures/underpass.sql + wget https://raw.githubusercontent.com/hotosm/tasking-manager/develop/tests/database/tasking-manager.sql + psql -U postgres -h localhost -p 5434 tm < tasking-manager.sql + + - name: Install Dependencies + run: | + python -m pip install --upgrade pip + pip install -r requirements.txt + pip install -e . + - name: Creating config.txt + run: | + mv src/config.txt.sample src/config.txt From bc01ff9d61167445989dd9ed244be31d8329fcd7 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 09:30:08 +0545 Subject: [PATCH 075/153] redis minimal installation added --- .github/workflows/build.yml | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index df58e2fb..10db713b 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -34,9 +34,13 @@ jobs: - name: Install redis run: | + sudo apt install lsb-release + curl -fsSL https://packages.redis.io/gpg | sudo gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg + echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/redis.list + sudo apt-get update sudo apt-get install redis - redis-cli - ping + sudo apt-get install redis + redis-cli ping - name: Create Databases run: | From c952c7555396a156540b5c53bb22cecc0a306351 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 09:38:35 +0545 Subject: [PATCH 076/153] removed double installation of redis --- .github/workflows/build.yml | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 10db713b..f63a82e1 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -28,7 +28,7 @@ jobs: with: python-version: 3.8 - - name: Install Gdal + - name: Install gdal run: | sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean @@ -39,7 +39,6 @@ jobs: echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/redis.list sudo apt-get update sudo apt-get install redis - sudo apt-get install redis redis-cli ping - name: Create Databases @@ -66,3 +65,6 @@ jobs: - name: Creating config.txt run: | mv src/config.txt.sample src/config.txt + - name: Run server + run: | + uvicorn API.main:app From ee806aa08e9ffa883952895e2b2ff1753850aba4 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 09:40:50 +0545 Subject: [PATCH 077/153] moved db section to top --- .github/workflows/build.yml | 26 +++++++++++++------------- 1 file changed, 13 insertions(+), 13 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index f63a82e1..4973b2ae 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -28,19 +28,6 @@ jobs: with: python-version: 3.8 - - name: Install gdal - run: | - sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - - - name: Install redis - run: | - sudo apt install lsb-release - curl -fsSL https://packages.redis.io/gpg | sudo gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg - echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/redis.list - sudo apt-get update - sudo apt-get install redis - redis-cli ping - - name: Create Databases run: | export PGPASSWORD='admin'; @@ -57,6 +44,19 @@ jobs: wget https://raw.githubusercontent.com/hotosm/tasking-manager/develop/tests/database/tasking-manager.sql psql -U postgres -h localhost -p 5434 tm < tasking-manager.sql + - name: Install gdal + run: | + sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean + + - name: Install redis + run: | + sudo apt install lsb-release + curl -fsSL https://packages.redis.io/gpg | sudo gpg --dearmor -o /usr/share/keyrings/redis-archive-keyring.gpg + echo "deb [signed-by=/usr/share/keyrings/redis-archive-keyring.gpg] https://packages.redis.io/deb $(lsb_release -cs) main" | sudo tee /etc/apt/sources.list.d/redis.list + sudo apt-get update + sudo apt-get install redis + redis-cli ping + - name: Install Dependencies run: | python -m pip install --upgrade pip From 27b46d81d775ca841f678fbec8284f934f6dcf6d Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 10:16:04 +0545 Subject: [PATCH 078/153] check for server error --- .github/workflows/build.yml | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4973b2ae..7841e8ed 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -62,9 +62,7 @@ jobs: python -m pip install --upgrade pip pip install -r requirements.txt pip install -e . - - name: Creating config.txt - run: | - mv src/config.txt.sample src/config.txt + - name: Run server run: | uvicorn API.main:app From f18c91afbcd0aff246f11f2819f9972f8299a240 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 10:39:15 +0545 Subject: [PATCH 079/153] added timeout --- .github/workflows/build.yml | 19 +++++++++++++++---- 1 file changed, 15 insertions(+), 4 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7841e8ed..d62140cb 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -10,7 +10,8 @@ on: - develop jobs: - deploy: + build: + timeout-minutes: 4 runs-on: ubuntu-latest services: postgres: @@ -62,7 +63,17 @@ jobs: python -m pip install --upgrade pip pip install -r requirements.txt pip install -e . - - - name: Run server + - name: Creating config.txt + run: | + mv src/config.txt.sample src/config.txt + - name: Run uvicorn server + run: | + uvicorn API.main:app & + env: + PORT: 8000 + - name: Run Celery server + run: | + celery --app API.api_worker worker --loglevel=INFO & + - name: Test Endpoint run: | - uvicorn API.main:app + curl -I http://127.0.0.1:8000/v1/tasking-manager/teams/ From e07ef5885f31ad2d27f3198b5578c357bbed4f67 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 10:45:14 +0545 Subject: [PATCH 080/153] changed redis url and get api --- .github/workflows/build.yml | 2 +- src/config.txt.sample | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index d62140cb..ec785f0c 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -76,4 +76,4 @@ jobs: celery --app API.api_worker worker --loglevel=INFO & - name: Test Endpoint run: | - curl -I http://127.0.0.1:8000/v1/tasking-manager/teams/ + curl -v http://127.0.0.1:8000/v1/tasking-manager/teams/ diff --git a/src/config.txt.sample b/src/config.txt.sample index c4569c57..7dcdc40d 100644 --- a/src/config.txt.sample +++ b/src/config.txt.sample @@ -38,5 +38,5 @@ secret_key=PutSomethingRandmHere env=dev [CELERY] -CELERY_BROKER_URL=redis://redis:6379/0 -CELERY_RESULT_BACKEND=redis://redis:6379/0 \ No newline at end of file +CELERY_BROKER_URL=redis://localhost:6379/0 +CELERY_RESULT_BACKEND=redis://localhost:6379/0 \ No newline at end of file From a74df8d5aaaa7df5c57dc6c0760aab84bd2a7c9f Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 10:56:48 +0545 Subject: [PATCH 081/153] added flower and mapathon endpoint test --- .github/workflows/build.yml | 8 ++++++-- README.md | 2 +- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ec785f0c..7ef28d11 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -71,9 +71,13 @@ jobs: uvicorn API.main:app & env: PORT: 8000 - - name: Run Celery server + - name: Run celery server run: | celery --app API.api_worker worker --loglevel=INFO & + - name: Run flower dashboard + run: | + celery --app API.api_worker flower --port=5555 --broker=redis://localhost:6379/ + - name: Test Endpoint run: | - curl -v http://127.0.0.1:8000/v1/tasking-manager/teams/ + curl -d '{ "fromTimestamp":"2022-07-22T13:15:00.461", "toTimestamp":"2022-07-22T14:15:00.461", "projectIds":[], "hashtags":[ "missingmaps" ] }' -H 'Content-Type: application/json' http://127.0.0.1:8000/v1/mapathon/summary/ diff --git a/README.md b/README.md index 7c81e24c..11a17d6f 100644 --- a/README.md +++ b/README.md @@ -80,7 +80,7 @@ celery --app API.api_worker worker --loglevel=INFO ### 5 . [OPTIONAL] Start flower for monitoring queue -API uses flower for monitoring the Celery distributed queue. Run this command on different shell +API uses flower for monitoring the Celery distributed queue. Run this command on different shell , if you are running redis on same machine your broker could be ```redis://localhost:6379/``` ``` celery --app API.api_worker flower --port=5550 --broker=redis://redis:6379/ From 1e1291ad9cdfcdf72b74b0a17c07aa9111aa4f7c Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 11:02:54 +0545 Subject: [PATCH 082/153] fixed typo --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7ef28d11..bf01392a 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -76,7 +76,7 @@ jobs: celery --app API.api_worker worker --loglevel=INFO & - name: Run flower dashboard run: | - celery --app API.api_worker flower --port=5555 --broker=redis://localhost:6379/ + celery --app API.api_worker flower --port=5555 --broker=redis://localhost:6379/ & - name: Test Endpoint run: | From e01775f7156ffbecf97f871e773a31d4675def7c Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 11:18:39 +0545 Subject: [PATCH 083/153] updated documentation along with curl command --- .github/workflows/build.yml | 2 ++ README.md | 10 +++++++++- docs/CONFIG_DOC.md | 9 ++++++++- src/config.txt.sample | 8 ++++---- 4 files changed, 23 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index bf01392a..2bca2142 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -40,6 +40,7 @@ jobs: run: | export PGPASSWORD='admin'; psql -U postgres -h localhost -p 5434 insights < tests/src/fixtures/insights.sql + psql -U postgres -h localhost -p 5434 insights < tests/src/fixtures/mapathon_summary.sql psql -U postgres -h localhost -p 5434 raw < tests/src/fixtures/raw_data.sql psql -U postgres -h localhost -p 5434 underpass < tests/src/fixtures/underpass.sql wget https://raw.githubusercontent.com/hotosm/tasking-manager/develop/tests/database/tasking-manager.sql @@ -81,3 +82,4 @@ jobs: - name: Test Endpoint run: | curl -d '{ "fromTimestamp":"2022-07-22T13:15:00.461", "toTimestamp":"2022-07-22T14:15:00.461", "projectIds":[], "hashtags":[ "missingmaps" ] }' -H 'Content-Type: application/json' http://127.0.0.1:8000/v1/mapathon/summary/ + curl -d '{"project_ids": [11224, 10042, 9906, 1381, 11203, 10681, 8055, 8732, 11193, 7305,11210, 10985, 10988, 11190, 6658, 5644, 10913, 6495, 4229],"fromTimestamp":"2021-08-27T9:00:00","toTimestamp":"2021-08-27T11:00:00","hashtags": ["mapandchathour2021"]}' -H 'Content-Type: application/json' http://127.0.0.1:8000/v1/mapathon/summary/ diff --git a/README.md b/README.md index 11a17d6f..04a61349 100644 --- a/README.md +++ b/README.md @@ -116,7 +116,15 @@ INSERT INTO users_roles VALUES (ID, 1); Repeat the steps to get a new access_token. -You can test with the `/mapathon/detail/` endpoint with the following input to check both authentication and database connection +- **Check Mapathon Summary : ** + +``` +curl -d '{"project_ids": [11224, 10042, 9906, 1381, 11203, 10681, 8055, 8732, 11193, 7305,11210, 10985, 10988, 11190, 6658, 5644, 10913, 6495, 4229],"fromTimestamp":"2021-08-27T9:00:00","toTimestamp":"2021-08-27T11:00:00","hashtags": ["mapandchathour2021"]}' -H 'Content-Type: application/json' http://127.0.0.1:8000/v1/mapathon/summary/ +``` +It should return some stats + +- Check Mapathon detailed report : + You can test with the `/mapathon/detail/` endpoint with the following input to check both authentication and database connection ``` {"fromTimestamp":"2019-04-08 10:00:00.000000","toTimestamp":"2019-04-08 11:00:00.000000","projectIds":[1],"hashtags":[]} diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index 75dd1d87..217b0fa4 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -35,6 +35,12 @@ Setup insights from [here](https://github.com/hotosm/insights) OR Create databas ``` psql -U postgres -h localhost insights < insights.sql ``` +Add a sample data dump for mapathon summary to visualize statistics + +``` +psql -U postgres -h localhost insights < tests/src/fixtures/mapathon_summary.sql +``` + Put your credentials in insights block ``` [INSIGHTS] @@ -111,7 +117,7 @@ secret_key=jnfdsjkfndsjkfnsdkjfnskfn ### 7. Configure celery and redis -Galaxy API uses [Celery 5](https://docs.celeryq.dev/en/stable/getting-started/first-steps-with-celery.html) and [Redis 6](https://redis.io/download/#redis-stack-downloads) for task queue management , Currently implemented for Rawdata endpoint. 6379 is the default port , You can change the port according to your configuration for the current docker compose use following +Galaxy API uses [Celery 5](https://docs.celeryq.dev/en/stable/getting-started/first-steps-with-celery.html) and [Redis 6](https://redis.io/download/#redis-stack-downloads) for task queue management , Currently implemented for Rawdata endpoint. 6379 is the default port . if you are running redis on same machine your broker could be ```redis://localhost:6379/```. You can change the port according to your configuration for the current docker compose use following ``` [CELERY] @@ -134,6 +140,7 @@ Considering You have PSQL-POSTGIS setup with user **postgres** host **localhost cd tests/src/fixtures/ psql -U postgres -h localhost -p 5432 insights < insights.sql + psql -U postgres -h localhost -p 5432 insights < tests/src/fixtures/mapathon_summary.sql psql -U postgres -h localhost -p 5432 raw < raw_data.sql psql -U postgres -h localhost -p 5432 underpass < underpass.sql wget https://raw.githubusercontent.com/hotosm/tasking-manager/develop/tests/database/tasking-manager.sql diff --git a/src/config.txt.sample b/src/config.txt.sample index 7dcdc40d..ff0b1568 100644 --- a/src/config.txt.sample +++ b/src/config.txt.sample @@ -3,28 +3,28 @@ host=localhost user=postgres password=admin database=insights -port=5432 +port=5434 [UNDERPASS] host=localhost user=postgres password=admin database=underpass -port=5432 +port=5434 [RAW_DATA] host=localhost user=postgres password=admin database=raw -port=5432 +port=5434 [TM] host=localhost user=postgres password=admin database=tm -port=5432 +port=5434 [OAUTH] client_id= From 7096b7fe8cc48ec8e434f77de99623c13649bd79 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 11:24:34 +0545 Subject: [PATCH 084/153] curl command setup --- .github/workflows/build.yml | 1 - 1 file changed, 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 2bca2142..b2e27ee5 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -81,5 +81,4 @@ jobs: - name: Test Endpoint run: | - curl -d '{ "fromTimestamp":"2022-07-22T13:15:00.461", "toTimestamp":"2022-07-22T14:15:00.461", "projectIds":[], "hashtags":[ "missingmaps" ] }' -H 'Content-Type: application/json' http://127.0.0.1:8000/v1/mapathon/summary/ curl -d '{"project_ids": [11224, 10042, 9906, 1381, 11203, 10681, 8055, 8732, 11193, 7305,11210, 10985, 10988, 11190, 6658, 5644, 10913, 6495, 4229],"fromTimestamp":"2021-08-27T9:00:00","toTimestamp":"2021-08-27T11:00:00","hashtags": ["mapandchathour2021"]}' -H 'Content-Type: application/json' http://127.0.0.1:8000/v1/mapathon/summary/ From ad9ead29725f4f7700ad8e1571ddf492e0a00458 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 11:34:06 +0545 Subject: [PATCH 085/153] updated readme --- README.md | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 04a61349..58e0da28 100644 --- a/README.md +++ b/README.md @@ -116,12 +116,14 @@ INSERT INTO users_roles VALUES (ID, 1); Repeat the steps to get a new access_token. -- **Check Mapathon Summary : ** +Check endpoints : + +- Check Mapathon Summary : ``` curl -d '{"project_ids": [11224, 10042, 9906, 1381, 11203, 10681, 8055, 8732, 11193, 7305,11210, 10985, 10988, 11190, 6658, 5644, 10913, 6495, 4229],"fromTimestamp":"2021-08-27T9:00:00","toTimestamp":"2021-08-27T11:00:00","hashtags": ["mapandchathour2021"]}' -H 'Content-Type: application/json' http://127.0.0.1:8000/v1/mapathon/summary/ ``` -It should return some stats + It should return some stats - Check Mapathon detailed report : You can test with the `/mapathon/detail/` endpoint with the following input to check both authentication and database connection From 325e38f26232a79f132d00bd48bef80cd5d55a5c Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 11:49:14 +0545 Subject: [PATCH 086/153] updated doc --- docs/CONFIG_DOC.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index 217b0fa4..44c61ceb 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -140,7 +140,7 @@ Considering You have PSQL-POSTGIS setup with user **postgres** host **localhost cd tests/src/fixtures/ psql -U postgres -h localhost -p 5432 insights < insights.sql - psql -U postgres -h localhost -p 5432 insights < tests/src/fixtures/mapathon_summary.sql + psql -U postgres -h localhost -p 5432 insights < mapathon_summary.sql psql -U postgres -h localhost -p 5432 raw < raw_data.sql psql -U postgres -h localhost -p 5432 underpass < underpass.sql wget https://raw.githubusercontent.com/hotosm/tasking-manager/develop/tests/database/tasking-manager.sql From a77f4ae172e4700b11bdd3014f0a6ea02b6d8bf6 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 12:16:06 +0545 Subject: [PATCH 087/153] check db connection --- .github/workflows/build.yml | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index b2e27ee5..7b395627 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -78,7 +78,9 @@ jobs: - name: Run flower dashboard run: | celery --app API.api_worker flower --port=5555 --broker=redis://localhost:6379/ & - + - name: Check db connection + run: | + psql -U postgres -h localhost -p 5434 -d insights -c 'select * from osm_changeset limit 1;' - name: Test Endpoint run: | curl -d '{"project_ids": [11224, 10042, 9906, 1381, 11203, 10681, 8055, 8732, 11193, 7305,11210, 10985, 10988, 11190, 6658, 5644, 10913, 6495, 4229],"fromTimestamp":"2021-08-27T9:00:00","toTimestamp":"2021-08-27T11:00:00","hashtags": ["mapandchathour2021"]}' -H 'Content-Type: application/json' http://127.0.0.1:8000/v1/mapathon/summary/ From 1a50d45a17a05ee8df010996888ee70b04da4816 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 12:23:58 +0545 Subject: [PATCH 088/153] check if we can install gdal without update --- .github/workflows/build.yml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 7b395627..a23f92cd 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -48,7 +48,7 @@ jobs: - name: Install gdal run: | - sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean + sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - name: Install redis run: | From 5b1ffbabab6ddc8f10989a88c06d0a21ef01af90 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 12:26:59 +0545 Subject: [PATCH 089/153] check with env --- .github/workflows/build.yml | 7 ++++++- 1 file changed, 6 insertions(+), 1 deletion(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index a23f92cd..ea685522 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -48,7 +48,7 @@ jobs: - name: Install gdal run: | - sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean + sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - name: Install redis run: | @@ -81,6 +81,11 @@ jobs: - name: Check db connection run: | psql -U postgres -h localhost -p 5434 -d insights -c 'select * from osm_changeset limit 1;' + env: + # The hostname used to communicate with the PostgreSQL service container + POSTGRES_HOST: postgres + # The default PostgreSQL port + POSTGRES_PORT: 5434 - name: Test Endpoint run: | curl -d '{"project_ids": [11224, 10042, 9906, 1381, 11203, 10681, 8055, 8732, 11193, 7305,11210, 10985, 10988, 11190, 6658, 5644, 10913, 6495, 4229],"fromTimestamp":"2021-08-27T9:00:00","toTimestamp":"2021-08-27T11:00:00","hashtags": ["mapandchathour2021"]}' -H 'Content-Type: application/json' http://127.0.0.1:8000/v1/mapathon/summary/ From 825b2407e8822c9a49dac5dc33c820c8d8488577 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 12:34:55 +0545 Subject: [PATCH 090/153] check with disabled upgrade command --- .github/workflows/build.yml | 8 ++------ 1 file changed, 2 insertions(+), 6 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index ea685522..4947d72d 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -48,7 +48,7 @@ jobs: - name: Install gdal run: | - sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean + sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean - name: Install redis run: | @@ -80,12 +80,8 @@ jobs: celery --app API.api_worker flower --port=5555 --broker=redis://localhost:6379/ & - name: Check db connection run: | + export PGPASSWORD='admin'; psql -U postgres -h localhost -p 5434 -d insights -c 'select * from osm_changeset limit 1;' - env: - # The hostname used to communicate with the PostgreSQL service container - POSTGRES_HOST: postgres - # The default PostgreSQL port - POSTGRES_PORT: 5434 - name: Test Endpoint run: | curl -d '{"project_ids": [11224, 10042, 9906, 1381, 11203, 10681, 8055, 8732, 11193, 7305,11210, 10985, 10988, 11190, 6658, 5644, 10913, 6495, 4229],"fromTimestamp":"2021-08-27T9:00:00","toTimestamp":"2021-08-27T11:00:00","hashtags": ["mapandchathour2021"]}' -H 'Content-Type: application/json' http://127.0.0.1:8000/v1/mapathon/summary/ From 5f49c51da5579ce3c316609464a7dd0462b80200 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 12:44:24 +0545 Subject: [PATCH 091/153] added rawdata snapshot --- .github/workflows/build.yml | 9 ++++----- 1 file changed, 4 insertions(+), 5 deletions(-) diff --git a/.github/workflows/build.yml b/.github/workflows/build.yml index 4947d72d..7ae123ef 100644 --- a/.github/workflows/build.yml +++ b/.github/workflows/build.yml @@ -78,10 +78,9 @@ jobs: - name: Run flower dashboard run: | celery --app API.api_worker flower --port=5555 --broker=redis://localhost:6379/ & - - name: Check db connection - run: | - export PGPASSWORD='admin'; - psql -U postgres -h localhost -p 5434 -d insights -c 'select * from osm_changeset limit 1;' - - name: Test Endpoint + - name: Run mapathon summary endpoint run: | curl -d '{"project_ids": [11224, 10042, 9906, 1381, 11203, 10681, 8055, 8732, 11193, 7305,11210, 10985, 10988, 11190, 6658, 5644, 10913, 6495, 4229],"fromTimestamp":"2021-08-27T9:00:00","toTimestamp":"2021-08-27T11:00:00","hashtags": ["mapandchathour2021"]}' -H 'Content-Type: application/json' http://127.0.0.1:8000/v1/mapathon/summary/ + - name: Run rawdata current snapshot + run: | + curl -d '{"geometry":{"type":"Polygon","coordinates":[[[83.96919250488281,28.194446860487773],[83.99751663208006,28.194446860487773],[83.99751663208006,28.214869548073377],[83.96919250488281,28.214869548073377],[83.96919250488281,28.194446860487773]]]}}' -H 'Content-Type: application/json' http://127.0.0.1:8000/v2/raw-data/current-snapshot/ From 82e14cdf1e9035d315676cdb21a59661cae9eb39 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Fri, 23 Sep 2022 12:49:35 +0545 Subject: [PATCH 092/153] Update README.md --- README.md | 1 + 1 file changed, 1 insertion(+) diff --git a/README.md b/README.md index 58e0da28..85e30f90 100644 --- a/README.md +++ b/README.md @@ -132,6 +132,7 @@ curl -d '{"project_ids": [11224, 10042, 9906, 1381, 11203, 10681, 8055, 8732, 11 {"fromTimestamp":"2019-04-08 10:00:00.000000","toTimestamp":"2019-04-08 11:00:00.000000","projectIds":[1],"hashtags":[]} ``` +Clean Setup of API can be found in gihub action workflow , You can follow the steps for more [clarity](https://github.com/hotosm/galaxy-api/actions/workflows/build.yml). #### API has been setup successfully ! From a18b7817e96fb4b410168e2ccc94a5cf85622ec1 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Fri, 23 Sep 2022 12:50:59 +0545 Subject: [PATCH 093/153] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index 85e30f90..f0003645 100644 --- a/README.md +++ b/README.md @@ -132,7 +132,7 @@ curl -d '{"project_ids": [11224, 10042, 9906, 1381, 11203, 10681, 8055, 8732, 11 {"fromTimestamp":"2019-04-08 10:00:00.000000","toTimestamp":"2019-04-08 11:00:00.000000","projectIds":[1],"hashtags":[]} ``` -Clean Setup of API can be found in gihub action workflow , You can follow the steps for more [clarity](https://github.com/hotosm/galaxy-api/actions/workflows/build.yml). +Clean Setup of API can be found in gihub action workflow , You can follow the steps for more [clarity](https://github.com/hotosm/galaxy-api/actions/workflows/build.yml). ```/workflows/build.yml``` #### API has been setup successfully ! From e6cba6b5e01b0e000bbf4a91cd20ecc240835586 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Fri, 23 Sep 2022 12:53:59 +0545 Subject: [PATCH 094/153] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index f0003645..717071c0 100644 --- a/README.md +++ b/README.md @@ -132,7 +132,7 @@ curl -d '{"project_ids": [11224, 10042, 9906, 1381, 11203, 10681, 8055, 8732, 11 {"fromTimestamp":"2019-04-08 10:00:00.000000","toTimestamp":"2019-04-08 11:00:00.000000","projectIds":[1],"hashtags":[]} ``` -Clean Setup of API can be found in gihub action workflow , You can follow the steps for more [clarity](https://github.com/hotosm/galaxy-api/actions/workflows/build.yml). ```/workflows/build.yml``` +Clean Setup of API can be found in github action workflow , You can follow the steps for more [clarity](https://github.com/hotosm/galaxy-api/actions/workflows/build.yml). ```/workflows/build.yml``` #### API has been setup successfully ! From b058ee52e3c2616088b1a3ce1f4164f6a8507877 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Fri, 23 Sep 2022 13:35:04 +0545 Subject: [PATCH 095/153] Update README.md --- README.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/README.md b/README.md index 717071c0..8cc8e448 100644 --- a/README.md +++ b/README.md @@ -5,14 +5,14 @@ ## Getting Started API Can be installed through docker or manually to local machine . -To get started with docker follow [GETTING_STARTED_WITH_DOCKER](https://github.com/hotosm/galaxy-api/blob/develop/docs/GETTING_STARTED_WITH_DOCKER.md) +To get started with docker follow [GETTING_STARTED_WITH_DOCKER](https://github.com/hotosm/galaxy-api/blob/develop/docs/GETTING_STARTED_WITH_DOCKER.md) md file is in /docs ### 1. Install requirements. - Install [gdal](https://gdal.org/index.html) on your machine , for example on Ubuntu ``` -sudo apt-get update && sudo apt-get -y upgrade && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean +sudo apt-get update && sudo apt-get -y install gdal-bin python3-gdal && sudo apt-get -y autoremove && sudo apt-get clean ``` - Install [redis](https://redis.io/docs/getting-started/installation/) on your system From 12b2f9060d0acb148bc3d5f69d4faa3781b4df8a Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Fri, 23 Sep 2022 13:41:54 +0545 Subject: [PATCH 096/153] Update README.md --- README.md | 6 ------ 1 file changed, 6 deletions(-) diff --git a/README.md b/README.md index 8cc8e448..ec5bba0b 100644 --- a/README.md +++ b/README.md @@ -53,12 +53,6 @@ cd galaxy-api pip install -r requirements.txt ``` -Install gdal python ( Include your gdal version , if you are using different version ) - -``` -pip install gdal==3.0.2 -``` - ### 2. Setup required config for API Make sure you have https://www.postgresql.org/ setup in your machine. From 79f948299826d3c3248efa072823f3c6659af43a Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Fri, 23 Sep 2022 15:35:10 +0545 Subject: [PATCH 097/153] Changed URL with relative and fixed typo --- docs/GETTING_STARTED_WITH_DOCKER.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/docs/GETTING_STARTED_WITH_DOCKER.md b/docs/GETTING_STARTED_WITH_DOCKER.md index 6843f171..928abe85 100644 --- a/docs/GETTING_STARTED_WITH_DOCKER.md +++ b/docs/GETTING_STARTED_WITH_DOCKER.md @@ -4,7 +4,7 @@ git clone https://github.com/hotosm/galaxy-api.git ``` -Follow [instructions](https://github.com/hotosm/galaxy-api/blob/develop/docs/CONFIG_DOC.md) and create config.txt inside /src/ +Follow [instructions](../docs/CONFIG_DOC.md) and create config.txt inside /src/ ### 2. Create the images and spin up the Docker containers: ``` @@ -22,11 +22,11 @@ API Docs will be displayed like this upon uvicorn successfull server start ![image](https://user-images.githubusercontent.com/36752999/191813795-fdfd46fe-5e6c-4ecf-be9b-f9f351d3d1d7.png) ``` -http://127.0.0.1:8000/5550/ +http://127.0.0.1:5550/ ``` Flower dashboard will look like this on successfull installation with a worker online ![image](https://user-images.githubusercontent.com/36752999/191813613-3859522b-ea68-4370-87b2-ebd1d8880d80.png) -Now, Continue Readme. Check installation from [here](https://github.com/hotosm/galaxy-api/blob/feature/celery/README.md#check-api-installation) +Now, Continue Readme. Check installation from [here](../README.md#check-api-installation) From 7141f6cec04ae8873cccb6f76c9dc005cf6a5389 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Fri, 23 Sep 2022 15:38:11 +0545 Subject: [PATCH 098/153] changed url to relative url --- README.md | 7 +++---- 1 file changed, 3 insertions(+), 4 deletions(-) diff --git a/README.md b/README.md index ec5bba0b..53e49484 100644 --- a/README.md +++ b/README.md @@ -5,8 +5,7 @@ ## Getting Started API Can be installed through docker or manually to local machine . -To get started with docker follow [GETTING_STARTED_WITH_DOCKER](https://github.com/hotosm/galaxy-api/blob/develop/docs/GETTING_STARTED_WITH_DOCKER.md) md file is in /docs - +To get started with docker follow [GETTING_STARTED_WITH_DOCKER](/docs/GETTING_STARTED_WITH_DOCKER.md) ### 1. Install requirements. - Install [gdal](https://gdal.org/index.html) on your machine , for example on Ubuntu @@ -57,7 +56,7 @@ pip install -r requirements.txt Make sure you have https://www.postgresql.org/ setup in your machine. -Setup necessary config for API from [docs/CONFIG.DOC](https://github.com/hotosm/galaxy-api/blob/develop/docs/CONFIG_DOC.md) +Setup necessary config for API from [docs/CONFIG.DOC](/docs/CONFIG_DOC.md) ### 3. Run server @@ -126,7 +125,7 @@ curl -d '{"project_ids": [11224, 10042, 9906, 1381, 11203, 10681, 8055, 8732, 11 {"fromTimestamp":"2019-04-08 10:00:00.000000","toTimestamp":"2019-04-08 11:00:00.000000","projectIds":[1],"hashtags":[]} ``` -Clean Setup of API can be found in github action workflow , You can follow the steps for more [clarity](https://github.com/hotosm/galaxy-api/actions/workflows/build.yml). ```/workflows/build.yml``` +Clean Setup of API can be found in github action workflow , You can follow the steps for more [clarity](/.github/workflows/build.yml). ```/workflows/build.yml``` #### API has been setup successfully ! From ab0c94a76c8c8f684f4e71afd055c1de20fbd087 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 17:51:00 +0545 Subject: [PATCH 099/153] resolved healthcheck url --- Dockerfile | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/Dockerfile b/Dockerfile index 73cdbc08..7f059c1e 100644 --- a/Dockerfile +++ b/Dockerfile @@ -16,4 +16,4 @@ RUN pip install -e . COPY /src/config.txt src/config.txt -HEALTHCHECK CMD curl -f http://localhost:8000 || exit 1 +HEALTHCHECK CMD curl -f http://localhost:8000/latest/docs || exit 1 From 8ccf55dcb6dc2563868a197fa6d05e07d99888b6 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 18:08:46 +0545 Subject: [PATCH 100/153] added note for docker users to use local postgres from container --- docs/CONFIG_DOC.md | 2 ++ 1 file changed, 2 insertions(+) diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index 44c61ceb..02c4d768 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -1,6 +1,8 @@ Before getting started on config Make sure you have [Postgres](https://www.postgresql.org/) and [Postgis](https://postgis.net/) setup in your machine. +**Note** : If you are running API through Docker container , Your local postgres should be accessible from containers . In order to do that find your network ip address (for linux/mac you can use ```ifconfig -l | xargs -n1 ipconfig getifaddr``` ) and use your ip as a host instead of localhost in config file + ## Compulsary Configuration ### 1. Create ```config.txt``` inside src directory. From 13b8a2531645adbe2f8f33dc75bdb4ed74ca8e21 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 23 Sep 2022 18:17:27 +0545 Subject: [PATCH 101/153] added supporting doc if connection fails from container to psql --- docs/CONFIG_DOC.md | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index 02c4d768..60886a3e 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -1,7 +1,8 @@ Before getting started on config Make sure you have [Postgres](https://www.postgresql.org/) and [Postgis](https://postgis.net/) setup in your machine. -**Note** : If you are running API through Docker container , Your local postgres should be accessible from containers . In order to do that find your network ip address (for linux/mac you can use ```ifconfig -l | xargs -n1 ipconfig getifaddr``` ) and use your ip as a host instead of localhost in config file +**Note** : If you are running API through Docker container , Your local postgres should be accessible from containers . In order to do that find your network ip address (for linux/mac you can use ```ifconfig -l | xargs -n1 ipconfig getifaddr``` ) and use your ip as a host instead of localhost in config file . +If connection still fails : You may need to edit your postgres config file ( ask postgres where it is by this query ````show config_file;```) and edit ```listen_addresses = '*'``` inside ```postgresql.conf``` . Also add ```host all all 0.0.0.0/0 trust``` in ```pg_hba.conf``` ## Compulsary Configuration From 2d7ce77e035f615efd7417529296f9c4aa2b188a Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Fri, 23 Sep 2022 18:20:10 +0545 Subject: [PATCH 102/153] formatted md file --- docs/CONFIG_DOC.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index 60886a3e..b580c6fe 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -2,7 +2,7 @@ Before getting started on config Make sure you have [Postgres](https://www.postgresql.org/) and [Postgis](https://postgis.net/) setup in your machine. **Note** : If you are running API through Docker container , Your local postgres should be accessible from containers . In order to do that find your network ip address (for linux/mac you can use ```ifconfig -l | xargs -n1 ipconfig getifaddr``` ) and use your ip as a host instead of localhost in config file . -If connection still fails : You may need to edit your postgres config file ( ask postgres where it is by this query ````show config_file;```) and edit ```listen_addresses = '*'``` inside ```postgresql.conf``` . Also add ```host all all 0.0.0.0/0 trust``` in ```pg_hba.conf``` +If connection still fails : You may need to edit your postgres config file ( ask postgres where it is by this query ```show config_file;``` ) and edit/enable ```listen_addresses = '*'``` inside ```postgresql.conf``` . Also add ```host all all 0.0.0.0/0 trust``` in ```pg_hba.conf``` ## Compulsary Configuration From 2b1c0b1d8ab46f7b6e0e959117d3e30661943058 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Sat, 24 Sep 2022 11:08:45 +0545 Subject: [PATCH 103/153] resolved docker cache on dependencies and updated options to connect outside container --- Dockerfile | 9 ++++++--- docs/CONFIG_DOC.md | 2 -- docs/GETTING_STARTED_WITH_DOCKER.md | 22 ++++++++++++++++++++-- 3 files changed, 26 insertions(+), 7 deletions(-) diff --git a/Dockerfile b/Dockerfile index 7f059c1e..fb09fdd4 100644 --- a/Dockerfile +++ b/Dockerfile @@ -6,14 +6,17 @@ RUN apt-get update && apt-get -y upgrade && \ apt-get -y autoremove && \ apt-get clean -COPY . /app +RUN mkdir /app +COPY requirements.docker.txt /app/requirements.docker.txt +COPY setup.py /app/setup.py WORKDIR /app RUN pip install --upgrade pip RUN pip install -r requirements.docker.txt -RUN pip install -e . -COPY /src/config.txt src/config.txt +COPY . /app + +RUN pip install -e . HEALTHCHECK CMD curl -f http://localhost:8000/latest/docs || exit 1 diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index b580c6fe..294b35a8 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -1,8 +1,6 @@ Before getting started on config Make sure you have [Postgres](https://www.postgresql.org/) and [Postgis](https://postgis.net/) setup in your machine. -**Note** : If you are running API through Docker container , Your local postgres should be accessible from containers . In order to do that find your network ip address (for linux/mac you can use ```ifconfig -l | xargs -n1 ipconfig getifaddr``` ) and use your ip as a host instead of localhost in config file . -If connection still fails : You may need to edit your postgres config file ( ask postgres where it is by this query ```show config_file;``` ) and edit/enable ```listen_addresses = '*'``` inside ```postgresql.conf``` . Also add ```host all all 0.0.0.0/0 trust``` in ```pg_hba.conf``` ## Compulsary Configuration diff --git a/docs/GETTING_STARTED_WITH_DOCKER.md b/docs/GETTING_STARTED_WITH_DOCKER.md index 928abe85..d923c659 100644 --- a/docs/GETTING_STARTED_WITH_DOCKER.md +++ b/docs/GETTING_STARTED_WITH_DOCKER.md @@ -6,6 +6,7 @@ git clone https://github.com/hotosm/galaxy-api.git Follow [instructions](../docs/CONFIG_DOC.md) and create config.txt inside /src/ + ### 2. Create the images and spin up the Docker containers: ``` docker-compose up -d --build @@ -18,15 +19,32 @@ Uvicorn should be running on 8000 port , Redis on default port , Celery with a w ``` http://127.0.0.1:8000/latest/docs ``` -API Docs will be displayed like this upon uvicorn successfull server start +API Docs will be displayed like this upon uvicorn successfull server start ![image](https://user-images.githubusercontent.com/36752999/191813795-fdfd46fe-5e6c-4ecf-be9b-f9f351d3d1d7.png) ``` http://127.0.0.1:5550/ ``` -Flower dashboard will look like this on successfull installation with a worker online +Flower dashboard will look like this on successfull installation with a worker online ![image](https://user-images.githubusercontent.com/36752999/191813613-3859522b-ea68-4370-87b2-ebd1d8880d80.png) Now, Continue Readme. Check installation from [here](../README.md#check-api-installation) + +### [Troubleshoot] If you can't connect to local postgres from API + +Since API is running through container, If you have local postgres installed on your machine that port may not be accesible as localhost from container , Container needs to connect to your local network , In order to do that there are few options +1. Option one : + + - For windows/ Mac docker user + Replace localhost with ```host.docker.internal``` – This resolves to the outside host and lets you connect to your machine's localhost through container , For example if postgres is running on your machine in 5432 , container can connect from ```host.docker.internal:5432``` + - For linux user : + Linux users can enable host.docker.internal too via the --add-host flag for docker run. Start your containers with this flag to expose the host string: + ```docker run -d --add-host host.docker.internal:host-gateway my-container:latest``` + +2. Option two : + + Find your network ip address (for linux/mac you can use ```ifconfig -l | xargs -n1 ipconfig getifaddr``` ) and use your ip as a host instead of localhost in config file . + + If connection still fails : You may need to edit your postgres config file ( ask postgres where it is by this query ```show config_file;``` ) and edit/enable ```listen_addresses = '*'``` inside ```postgresql.conf``` . Also add ```host all all 0.0.0.0/0 trust``` in ```pg_hba.conf``` From b44e4abd728ba458dcf9a8c34540d45e069c1a94 Mon Sep 17 00:00:00 2001 From: itskshtiiz321 Date: Sat, 24 Sep 2022 06:28:21 +0000 Subject: [PATCH 104/153] added pickle and status --- API/api_worker.py | 2 +- API/raw_data.py | 7 +------ 2 files changed, 2 insertions(+), 7 deletions(-) diff --git a/API/api_worker.py b/API/api_worker.py index 8d6cb128..2fe9135c 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -22,7 +22,7 @@ celery.conf.task_serializer = 'pickle' celery.conf.result_serializer = 'pickle' -celery.conf.accept_content = ['application/json', 'application/x-python-serialize'] +celery.conf.accept_content = ['application/json','pickle', 'application/x-python-serialize'] @celery.task(bind=True, name="process_raw_data") diff --git a/API/raw_data.py b/API/raw_data.py index 35e18814..c83fe4ba 100644 --- a/API/raw_data.py +++ b/API/raw_data.py @@ -536,10 +536,5 @@ def get_task_status(task_id): """ task_result = AsyncResult(task_id, app=celery) - - result = { - "id": task_id, - "status": task_result.status, - "result": task_result.result if task_result.status == 'SUCCESS' else None - } + result = { "id": task_id,"status": task_result.state, "result": task_result.result if task_result.status == 'SUCCESS' else None } return JSONResponse(result) From 8bcbd725027381a5ddf17c97b2fde53e21ddd5b1 Mon Sep 17 00:00:00 2001 From: itskshtiiz321 Date: Sat, 24 Sep 2022 06:36:39 +0000 Subject: [PATCH 105/153] round digit to 2 decimal for binded file size --- API/raw_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/API/raw_data.py b/API/raw_data.py index c83fe4ba..c7844f2a 100644 --- a/API/raw_data.py +++ b/API/raw_data.py @@ -286,7 +286,7 @@ def get_current_snapshot_osm_data(params: RawDataCurrentParams, background_tasks logging.info( f"Done Export : {exportname} of {round(inside_file_size/1000000)} MB / {geom_area} sqkm in {response_time_str}") - return {"download_url": download_url, "file_name": exportname, "response_time": response_time_str, "query_area": f"""{geom_area} Sq Km """, "binded_file_size": f"""{round(inside_file_size/1000000)} MB""", "zip_file_size_bytes": {zip_file_size}} + return {"download_url": download_url, "file_name": exportname, "response_time": response_time_str, "query_area": f"""{geom_area} Sq Km """, "binded_file_size": f"""{round(inside_file_size/1000000,2)} MB""", "zip_file_size_bytes": {zip_file_size}} @router.get("/status/") From f349755d36f2a0be22b02b868ea4dbec71e14631 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Sat, 24 Sep 2022 13:35:34 +0545 Subject: [PATCH 106/153] Introduces rate limit --- API/api_worker.py | 2 +- API/main.py | 10 +++++++++- API/raw_data.py | 6 +++--- docs/CONFIG_DOC.md | 1 + requirements.docker.txt | 4 ++-- requirements.txt | 4 ++-- src/galaxy/config.py | 4 ++++ 7 files changed, 22 insertions(+), 9 deletions(-) diff --git a/API/api_worker.py b/API/api_worker.py index 2fe9135c..8d6cb128 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -22,7 +22,7 @@ celery.conf.task_serializer = 'pickle' celery.conf.result_serializer = 'pickle' -celery.conf.accept_content = ['application/json','pickle', 'application/x-python-serialize'] +celery.conf.accept_content = ['application/json', 'application/x-python-serialize'] @celery.task(bind=True, name="process_raw_data") diff --git a/API/main.py b/API/main.py index 3935356c..19e2d9b2 100644 --- a/API/main.py +++ b/API/main.py @@ -37,8 +37,11 @@ # from .test_router import router as test_router from .status import router as status_router from src.galaxy.db_session import database_instance -from src.galaxy.config import use_connection_pooling, use_s3_to_upload, logger as logging, config +from src.galaxy.config import limiter, use_connection_pooling, use_s3_to_upload, logger as logging, config from fastapi_versioning import VersionedFastAPI +from slowapi import _rate_limit_exceeded_handler +from slowapi.errors import RateLimitExceeded + # only use sentry if it is specified in config blocks if config.get("SENTRY", "dsn", fallback=None): @@ -56,6 +59,8 @@ import os os.environ['OAUTHLIB_INSECURE_TRANSPORT'] = '1' + + app = FastAPI(title="Galaxy API") # app.include_router(test_router) @@ -81,6 +86,9 @@ version_format='{major}', prefix_format='/v{major}') +app.state.limiter = limiter +app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler) + origins = ["*"] diff --git a/API/raw_data.py b/API/raw_data.py index c7844f2a..23caf36b 100644 --- a/API/raw_data.py +++ b/API/raw_data.py @@ -40,8 +40,7 @@ from src.galaxy.app import RawData, S3FileTransfer from celery.result import AsyncResult from .api_worker import process_raw_data, celery - -from src.galaxy.config import use_s3_to_upload, logger as logging, config +from src.galaxy.config import export_rate_limit, use_s3_to_upload, logger as logging, config, limiter router = APIRouter(prefix="/raw-data") @@ -335,6 +334,7 @@ def watch_s3_upload(url: str, path: str) -> None: @router.post("/current-snapshot/") +@limiter.limit(f"{export_rate_limit}/minute") @version(2) def get_current_snapshot_of_osm_data( params: RawDataCurrentParams, request: Request @@ -536,5 +536,5 @@ def get_task_status(task_id): """ task_result = AsyncResult(task_id, app=celery) - result = { "id": task_id,"status": task_result.state, "result": task_result.result if task_result.status == 'SUCCESS' else None } + result = { "id": task_id, "status": task_result.state, "result": task_result.result if task_result.status == 'SUCCESS' else None } return JSONResponse(result) diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index 294b35a8..c7d8f978 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -213,6 +213,7 @@ use_connection_pooling=True # default it will not use connection pooling but you log_level=info #options are info,debug,warning,error env=dev # default is dev , supported values are dev and prod shp_limit=6000 # in mb default is 4096 +export_rate_limit=5 # no of requests per minute - default is 5 requests per minute ``` Based on your requirement you can also customize rawdata exports parameter using EXPORT_UPLOAD block diff --git a/requirements.docker.txt b/requirements.docker.txt index 1bf94ede..6adae62f 100644 --- a/requirements.docker.txt +++ b/requirements.docker.txt @@ -30,5 +30,5 @@ fastapi-versioning==0.10.0 redis==4.3.4 celery==5.2.7 flower==1.2.0 -#gdal and ogr2ogr is required on the machine to run rawdata endpoint -# gdal == 3.3.2 +slowapi==0.1.6 + diff --git a/requirements.txt b/requirements.txt index 1bf94ede..6adae62f 100644 --- a/requirements.txt +++ b/requirements.txt @@ -30,5 +30,5 @@ fastapi-versioning==0.10.0 redis==4.3.4 celery==5.2.7 flower==1.2.0 -#gdal and ogr2ogr is required on the machine to run rawdata endpoint -# gdal == 3.3.2 +slowapi==0.1.6 + diff --git a/src/galaxy/config.py b/src/galaxy/config.py index db80d7cd..5af0b598 100644 --- a/src/galaxy/config.py +++ b/src/galaxy/config.py @@ -22,6 +22,8 @@ from configparser import ConfigParser import logging import os +from slowapi.util import get_remote_address +from slowapi import Limiter CONFIG_FILE_PATH = "src/config.txt" @@ -31,6 +33,8 @@ config = ConfigParser() config.read(CONFIG_FILE_PATH) +limiter = Limiter(key_func=get_remote_address) # rate limiter for API requests +export_rate_limit = int(config.get("API_CONFIG", "export_rate_limit", fallback=5)) # get log level from config log_level = config.get("API_CONFIG", "log_level", fallback=None) use_s3_to_upload = False From c9b9b7b948f6ee857bb9ca378780b0ec2886daff Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Sat, 24 Sep 2022 15:51:35 +0545 Subject: [PATCH 107/153] resolved mapathon detail docs --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 53e49484..6e1e4813 100644 --- a/README.md +++ b/README.md @@ -5,7 +5,7 @@ ## Getting Started API Can be installed through docker or manually to local machine . -To get started with docker follow [GETTING_STARTED_WITH_DOCKER](/docs/GETTING_STARTED_WITH_DOCKER.md) +To get started with docker follow [GETTING_STARTED_WITH_DOCKER](/docs/GETTING_STARTED_WITH_DOCKER.md) ### 1. Install requirements. - Install [gdal](https://gdal.org/index.html) on your machine , for example on Ubuntu @@ -119,10 +119,10 @@ curl -d '{"project_ids": [11224, 10042, 9906, 1381, 11203, 10681, 8055, 8732, 11 It should return some stats - Check Mapathon detailed report : - You can test with the `/mapathon/detail/` endpoint with the following input to check both authentication and database connection + You can test with the `/mapathon/detail/` endpoint with the following input to check both authentication , database connection and visualize the above summary result ``` -{"fromTimestamp":"2019-04-08 10:00:00.000000","toTimestamp":"2019-04-08 11:00:00.000000","projectIds":[1],"hashtags":[]} +{"project_ids": [11224, 10042, 9906, 1381, 11203, 10681, 8055, 8732, 11193, 7305,11210, 10985, 10988, 11190, 6658, 5644, 10913, 6495, 4229],"fromTimestamp":"2021-08-27T9:00:00","toTimestamp":"2021-08-27T11:00:00","hashtags": ["mapandchathour2021"]} ``` Clean Setup of API can be found in github action workflow , You can follow the steps for more [clarity](/.github/workflows/build.yml). ```/workflows/build.yml``` From fd286845f88b772d7e77dc76dfa5f0501be1d124 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Sat, 24 Sep 2022 16:04:06 +0545 Subject: [PATCH 108/153] resolved raise exception if config file not found --- src/galaxy/config.py | 4 +++- 1 file changed, 3 insertions(+), 1 deletion(-) diff --git a/src/galaxy/config.py b/src/galaxy/config.py index 5af0b598..88db1fd1 100644 --- a/src/galaxy/config.py +++ b/src/galaxy/config.py @@ -24,11 +24,13 @@ import os from slowapi.util import get_remote_address from slowapi import Limiter +import errno +import os CONFIG_FILE_PATH = "src/config.txt" if os.path.exists(CONFIG_FILE_PATH) is False: - raise "Config file does not exist : src/config.txt" + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), 'src/config.txt') config = ConfigParser() config.read(CONFIG_FILE_PATH) From 0116b0c3e2035d2ee8f29785f2d4ae1a7babbe10 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Sat, 24 Sep 2022 16:07:02 +0545 Subject: [PATCH 109/153] removed string in error file --- src/galaxy/config.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/galaxy/config.py b/src/galaxy/config.py index 88db1fd1..2c28519e 100644 --- a/src/galaxy/config.py +++ b/src/galaxy/config.py @@ -30,7 +30,7 @@ CONFIG_FILE_PATH = "src/config.txt" if os.path.exists(CONFIG_FILE_PATH) is False: - raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), 'src/config.txt') + raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), CONFIG_FILE_PATH) config = ConfigParser() config.read(CONFIG_FILE_PATH) From b55ff6b4a1ec96bda94178f592f292652cbac679 Mon Sep 17 00:00:00 2001 From: itskshtiiz321 Date: Sun, 25 Sep 2022 15:52:26 +0000 Subject: [PATCH 110/153] reverted lgic with previous --- API/api_worker.py | 67 ++++++++++++++++++++++++++++++----------------- 1 file changed, 43 insertions(+), 24 deletions(-) diff --git a/API/api_worker.py b/API/api_worker.py index 8d6cb128..964dee1d 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -2,6 +2,8 @@ import pathlib import orjson import shutil +import time +import requests from datetime import datetime as dt import zipfile from celery import Celery @@ -29,9 +31,7 @@ def process_raw_data(self, incoming_scheme, incoming_host, params): try: start_time = dt.now() - if ( - params.output_type is None - ): # if no ouput type is supplied default is geojson output + if params.output_type is None: # if no ouput type is supplied default is geojson output params.output_type = RawDataOutputType.GEOJSON.value # unique id for zip file and geojson for each export @@ -47,15 +47,17 @@ def process_raw_data(self, incoming_scheme, incoming_host, params): logging.info("Request %s received", exportname) - dump_temp_file, geom_area, root_dir_file = RawData(params).extract_current_data( - exportname - ) + dump_temp_file, geom_area, root_dir_file = RawData( + params).extract_current_data(exportname) path = f"""{root_dir_file}{exportname}/""" if os.path.exists(path) is False: - return JSONResponse(status_code=400, content={"Error": "Request went too big"}) + return JSONResponse( + status_code=400, + content={"Error": "Request went too big"} + ) - logging.debug("Zip Binding Started !") + logging.debug('Zip Binding Started !') # saving file in temp directory instead of memory so that zipping file will not eat memory zip_temp_path = f"""{root_dir_file}{exportname}.zip""" zf = zipfile.ZipFile(zip_temp_path, "w", zipfile.ZIP_DEFLATED) @@ -69,7 +71,7 @@ def process_raw_data(self, incoming_scheme, incoming_host, params): orjson.dumps(dict(params.geometry))) zf.close() - logging.debug("Zip Binding Done !") + logging.debug('Zip Binding Done !') inside_file_size = 0 for temp_file in dump_temp_file: # clearing tmp geojson file since it is already dumped to zip file we don't need it anymore @@ -83,14 +85,13 @@ def process_raw_data(self, incoming_scheme, incoming_host, params): if use_s3_to_upload: file_transfer_obj = S3FileTransfer() download_url = file_transfer_obj.upload(zip_temp_path, exportname) + # watches the status code of the link provided and deletes the file if it is 200 + watch_s3_upload(download_url, zip_temp_path) else: # getting from config in case api and frontend is not hosted on same url client_host = config.get( - "API_CONFIG", - "api_host", - fallback=f"""{incoming_scheme}://{incoming_host}""", - ) + "API_CONFIG", "api_host", fallback=f"""{incoming_scheme}://{incoming_host}""") client_port = config.get("API_CONFIG", "api_port", fallback=8000) if client_port: @@ -103,17 +104,9 @@ def process_raw_data(self, incoming_scheme, incoming_host, params): response_time = dt.now() - start_time response_time_str = str(response_time) logging.info( - f"Done Export : {exportname} of {round(inside_file_size/1000000)} MB / {geom_area} sqkm in {response_time_str}" - ) - - return { - "download_url": download_url, - "file_name": exportname, - "response_time": response_time_str, - "query_area": f"{geom_area} Sq Km ", - "binded_file_size": f"{round(inside_file_size/1000000)} MB", - "zip_file_size_bytes": zip_file_size, - } + f"Done Export : {exportname} of {round(inside_file_size/1000000)} MB / {geom_area} sqkm in {response_time_str}") + + return {"download_url": download_url, "file_name": exportname, "response_time": response_time_str, "query_area": f"""{geom_area} Sq Km """, "binded_file_size": f"""{round(inside_file_size/1000000,2)} MB""", "zip_file_size_bytes": {zip_file_size}} except Exception as ex: raise ex @@ -124,3 +117,29 @@ def remove_file(path: str) -> None: shutil.rmtree(path) except OSError as ex: logging.error("Error: %s - %s.", ex.filename, ex.strerror) + +def watch_s3_upload(url: str, path: str) -> None: + """Watches upload of s3 either it is completed or not and removes the temp file after completion + + Args: + url (_type_): url generated by the script where data will be available + path (_type_): path where temp file is located at + """ + start_time = time.time() + remove_temp_file = True + check_call = requests.head(url).status_code + if check_call != 200: + logging.debug("Upload is not done yet waiting ...") + while check_call != 200: # check until status is not green + check_call = requests.head(url).status_code + if time.time() - start_time > 300: + logging.error( + "Upload time took more than 5 min , Killing watch : %s , URL : %s", path, url) + remove_temp_file = False # don't remove the file if upload fails + break + time.sleep(3) # check each 3 second + # once it is verfied file is uploaded finally remove the file + if remove_temp_file: + logging.debug( + "File is uploaded at %s , flushing out from %s", url, path) + os.unlink(path) \ No newline at end of file From a9a0c985eb861fa8154fbddb83403e60a6aa665d Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Mon, 26 Sep 2022 09:06:20 +0545 Subject: [PATCH 111/153] Reformatted boto exceptions --- API/api_worker.py | 11 +++++------ API/raw_data.py | 2 +- src/galaxy/app.py | 16 ++++++++++------ src/galaxy/config.py | 7 ++++++- 4 files changed, 22 insertions(+), 14 deletions(-) diff --git a/API/api_worker.py b/API/api_worker.py index 964dee1d..ac110a6c 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -80,15 +80,11 @@ def process_raw_data(self, incoming_scheme, incoming_host, params): # remove the file that are just binded to zip file , we no longer need to store it remove_file(path) - # check if download url will be generated from s3 or not from config if use_s3_to_upload: file_transfer_obj = S3FileTransfer() download_url = file_transfer_obj.upload(zip_temp_path, exportname) - # watches the status code of the link provided and deletes the file if it is 200 - watch_s3_upload(download_url, zip_temp_path) else: - # getting from config in case api and frontend is not hosted on same url client_host = config.get( "API_CONFIG", "api_host", fallback=f"""{incoming_scheme}://{incoming_host}""") @@ -101,12 +97,14 @@ def process_raw_data(self, incoming_scheme, incoming_host, params): # getting file size of zip , units are in bytes converted to mb in response zip_file_size = os.path.getsize(zip_temp_path) + # watches the status code of the link provided and deletes the file if it is 200 + watch_s3_upload(download_url, zip_temp_path) response_time = dt.now() - start_time response_time_str = str(response_time) logging.info( f"Done Export : {exportname} of {round(inside_file_size/1000000)} MB / {geom_area} sqkm in {response_time_str}") + return {"download_url": download_url, "file_name": exportname, "process_time": response_time_str, "query_area": f"""{geom_area} Sq Km """, "binded_file_size": f"""{round(inside_file_size/1000000,2)} MB""", "zip_file_size_bytes": zip_file_size} - return {"download_url": download_url, "file_name": exportname, "response_time": response_time_str, "query_area": f"""{geom_area} Sq Km """, "binded_file_size": f"""{round(inside_file_size/1000000,2)} MB""", "zip_file_size_bytes": {zip_file_size}} except Exception as ex: raise ex @@ -118,6 +116,7 @@ def remove_file(path: str) -> None: except OSError as ex: logging.error("Error: %s - %s.", ex.filename, ex.strerror) + def watch_s3_upload(url: str, path: str) -> None: """Watches upload of s3 either it is completed or not and removes the temp file after completion @@ -142,4 +141,4 @@ def watch_s3_upload(url: str, path: str) -> None: if remove_temp_file: logging.debug( "File is uploaded at %s , flushing out from %s", url, path) - os.unlink(path) \ No newline at end of file + os.unlink(path) diff --git a/API/raw_data.py b/API/raw_data.py index 23caf36b..7f634c6f 100644 --- a/API/raw_data.py +++ b/API/raw_data.py @@ -337,7 +337,7 @@ def watch_s3_upload(url: str, path: str) -> None: @limiter.limit(f"{export_rate_limit}/minute") @version(2) def get_current_snapshot_of_osm_data( - params: RawDataCurrentParams, request: Request + params: RawDataCurrentParams, background_tasks: BackgroundTasks, request: Request ): """Generates the current raw OpenStreetMap data available on database based on the input geometry, query and spatial features diff --git a/src/galaxy/app.py b/src/galaxy/app.py index fb09751e..8c0cb94c 100644 --- a/src/galaxy/app.py +++ b/src/galaxy/app.py @@ -20,13 +20,13 @@ import os import sys import threading -from .config import get_db_connection_params, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, BUCKET_NAME, level, logger as logging, export_path, use_connection_pooling, shp_limit -from .validation.models import Source +from src.galaxy.config import get_db_connection_params, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, BUCKET_NAME, level, logger as logging, export_path, use_connection_pooling, shp_limit +from src.galaxy.validation.models import Source from psycopg2 import connect, sql from psycopg2.extras import DictCursor from psycopg2 import OperationalError -from .validation.models import UserRole, TeamMemberFunction, List, RawDataCurrentParams, RawDataOutputType, MapathonRequestParams, MappedFeature, MapathonSummary, MappedFeatureWithUser, MapathonContributor, MappedTaskStats, ValidatedTaskStats, TimeSpentMapping, OrganizationHashtagParams, DataRecencyParams, OrganizationHashtag, Trainings, TrainingParams, TrainingOrganisations, User, TimeSpentValidating, TMUserStats, MapathonDetail, UserStatistics, DataQualityHashtagParams, DataQuality_TM_RequestParams, DataQuality_username_RequestParams -from .query_builder.builder import generate_list_teams_metadata, get_grid_id_query, raw_currentdata_extraction_query, check_last_updated_rawdata, extract_geometry_type_query, raw_historical_data_extraction_query, generate_tm_teams_list, generate_tm_validators_stats_query, create_user_time_spent_mapping_and_validating_query, create_user_tasks_mapped_and_validated_query, generate_organization_hashtag_reports, check_last_updated_user_data_quality_underpass, create_changeset_query, create_osm_history_query, create_users_contributions_query, check_last_updated_osm_insights, generate_data_quality_TM_query, generate_data_quality_hashtag_reports, generate_data_quality_username_query, check_last_updated_mapathon_insights, check_last_updated_user_statistics_insights, check_last_updated_osm_underpass, generate_mapathon_summary_underpass_query, generate_training_organisations_query, generate_filter_training_query, generate_training_query, create_UserStats_get_statistics_query, create_userstats_get_statistics_with_hashtags_query +from src.galaxy.validation.models import UserRole, TeamMemberFunction, List, RawDataCurrentParams, RawDataOutputType, MapathonRequestParams, MappedFeature, MapathonSummary, MappedFeatureWithUser, MapathonContributor, MappedTaskStats, ValidatedTaskStats, TimeSpentMapping, OrganizationHashtagParams, DataRecencyParams, OrganizationHashtag, Trainings, TrainingParams, TrainingOrganisations, User, TimeSpentValidating, TMUserStats, MapathonDetail, UserStatistics, DataQualityHashtagParams, DataQuality_TM_RequestParams, DataQuality_username_RequestParams +from src.galaxy.query_builder.builder import generate_list_teams_metadata, get_grid_id_query, raw_currentdata_extraction_query, check_last_updated_rawdata, extract_geometry_type_query, raw_historical_data_extraction_query, generate_tm_teams_list, generate_tm_validators_stats_query, create_user_time_spent_mapping_and_validating_query, create_user_tasks_mapped_and_validated_query, generate_organization_hashtag_reports, check_last_updated_user_data_quality_underpass, create_changeset_query, create_osm_history_query, create_users_contributions_query, check_last_updated_osm_insights, generate_data_quality_TM_query, generate_data_quality_hashtag_reports, generate_data_quality_username_query, check_last_updated_mapathon_insights, check_last_updated_user_statistics_insights, check_last_updated_osm_underpass, generate_mapathon_summary_underpass_query, generate_training_organisations_query, generate_filter_training_query, generate_training_query, create_UserStats_get_statistics_query, create_userstats_get_statistics_with_hashtags_query import json import pandas from json import loads as json_loads @@ -1005,6 +1005,7 @@ def ogr_export(outputtype, query=None, export_temp_path=None, point_query=None, @staticmethod def query2geojson(con, extraction_query, dump_temp_file_path): """Function written from scratch without being dependent on any library, Provides better performance for geojson binding""" + logging.debug("I am inside query2geojson and my query is %s", extraction_query) # creating geojson file pre_geojson = """{"type": "FeatureCollection","features": [""" post_geojson = """]}""" @@ -1138,6 +1139,7 @@ def extract_current_data(self, exportname): _file_path_: geojson file location path """ # first check either geometry needs grid or not for querying + logging.debug("I am inside extract current data function") grid_id, geometry_dump, geom_area = RawData.get_grid_id( self.params.geometry, self.cur) if self.params.output_type is None: @@ -1156,12 +1158,14 @@ def extract_current_data(self, exportname): os.makedirs(path) # create file path with respect to of output type dump_temp_file_path = f"""{path}{exportname}.{output_type.lower()}""" + logging.debug("after dump temp file path with %s and %s", output_type, RawDataOutputType.GEOJSON.value) try: # currently we have only geojson binding function written other than that we have depend on ogr - if output_type is RawDataOutputType.GEOJSON.value: + if output_type == RawDataOutputType.GEOJSON.value: + logging.debug("above firing geojson") RawData.query2geojson(self.con, raw_currentdata_extraction_query( self.params, g_id=grid_id, geometry_dump=geometry_dump), dump_temp_file_path) # uses own conversion class - elif output_type is RawDataOutputType.SHAPEFILE.value: + elif output_type == RawDataOutputType.SHAPEFILE.value: point_query, line_query, poly_query, point_schema, line_schema, poly_schema = extract_geometry_type_query( self.params, ogr_export=True) # point_query, line_query, poly_query, point_schema, line_schema, poly_schema = extract_geometry_type_query( diff --git a/src/galaxy/config.py b/src/galaxy/config.py index 2c28519e..2013e62c 100644 --- a/src/galaxy/config.py +++ b/src/galaxy/config.py @@ -57,8 +57,13 @@ # logging.getLogger("fiona").propagate = False # disable fiona logging logging.basicConfig(format='%(asctime)s - %(message)s', level=level) logging.getLogger('boto3').propagate = False # disable boto3 logging +logging.getLogger('botocore').propagate = False # disable boto3 logging +logging.getLogger('s3transfer').propagate = False # disable boto3 logging +logging.getLogger('boto').propagate = False # disable boto3 logging -logger = logging.getLogger('galaxy') + + +logger = logging.getLogger('src.galaxy') export_path = config.get('API_CONFIG', 'export_path', fallback=None) if export_path is None: From b3ba9e44ac7f102dc2dc779f38e459ab0148e70b Mon Sep 17 00:00:00 2001 From: itskshtiiz321 Date: Mon, 26 Sep 2022 03:29:16 +0000 Subject: [PATCH 112/153] ignore all log files --- .gitignore | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/.gitignore b/.gitignore index 2fd08af4..8af24106 100644 --- a/.gitignore +++ b/.gitignore @@ -12,4 +12,5 @@ build newrelic.ini newrelic.ini_backup exports -*.out \ No newline at end of file +*.out +*.log From e1ab356919832fefccd8fb3d1d678bd452fc6349 Mon Sep 17 00:00:00 2001 From: itskshtiiz321 Date: Mon, 26 Sep 2022 04:18:12 +0000 Subject: [PATCH 113/153] disabled logs --- src/galaxy/app.py | 4 ---- 1 file changed, 4 deletions(-) diff --git a/src/galaxy/app.py b/src/galaxy/app.py index 8c0cb94c..dce99674 100644 --- a/src/galaxy/app.py +++ b/src/galaxy/app.py @@ -1005,7 +1005,6 @@ def ogr_export(outputtype, query=None, export_temp_path=None, point_query=None, @staticmethod def query2geojson(con, extraction_query, dump_temp_file_path): """Function written from scratch without being dependent on any library, Provides better performance for geojson binding""" - logging.debug("I am inside query2geojson and my query is %s", extraction_query) # creating geojson file pre_geojson = """{"type": "FeatureCollection","features": [""" post_geojson = """]}""" @@ -1139,7 +1138,6 @@ def extract_current_data(self, exportname): _file_path_: geojson file location path """ # first check either geometry needs grid or not for querying - logging.debug("I am inside extract current data function") grid_id, geometry_dump, geom_area = RawData.get_grid_id( self.params.geometry, self.cur) if self.params.output_type is None: @@ -1158,11 +1156,9 @@ def extract_current_data(self, exportname): os.makedirs(path) # create file path with respect to of output type dump_temp_file_path = f"""{path}{exportname}.{output_type.lower()}""" - logging.debug("after dump temp file path with %s and %s", output_type, RawDataOutputType.GEOJSON.value) try: # currently we have only geojson binding function written other than that we have depend on ogr if output_type == RawDataOutputType.GEOJSON.value: - logging.debug("above firing geojson") RawData.query2geojson(self.con, raw_currentdata_extraction_query( self.params, g_id=grid_id, geometry_dump=geometry_dump), dump_temp_file_path) # uses own conversion class elif output_type == RawDataOutputType.SHAPEFILE.value: From f2db19f893c2517a004b4fd2f21b5ea6ba359405 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Tue, 27 Sep 2022 11:39:54 +0545 Subject: [PATCH 114/153] reformatted --- src/galaxy/app.py | 85 +++-------------------------------------------- 1 file changed, 5 insertions(+), 80 deletions(-) diff --git a/src/galaxy/app.py b/src/galaxy/app.py index dce99674..1c021a87 100644 --- a/src/galaxy/app.py +++ b/src/galaxy/app.py @@ -925,16 +925,16 @@ def ogr_export(outputtype, query=None, export_temp_path=None, point_query=None, if query: formatted_query = query.replace('"', '\\"') # for mbtiles we need additional input as well i.e. minzoom and maxzoom , setting default at max=22 and min=10 - if outputtype is RawDataOutputType.MBTILES.value: + if outputtype == RawDataOutputType.MBTILES.value: cmd = '''ogr2ogr -overwrite -f \"{outputtype}\" -dsco MINZOOM=10 -dsco MAXZOOM=22 {export_path} PG:"host={host} user={username} dbname={db} password={password}" -sql "{pg_sql_select}" -progress'''.format( outputtype=outputtype, export_path=export_temp_path, host=db_items.get('host'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=formatted_query) - elif outputtype is RawDataOutputType.SHAPEFILE.value: + elif outputtype == RawDataOutputType.SHAPEFILE.value: # if it is shapefile it needs different logic for point,line and polygon file_paths = [] outputtype = "ESRI Shapefile" if point_query: - query_path = f"""{dump_temp_file_path}_sql.sql""" + query_path = f"""{dump_temp_file_path}_point.sql""" # writing to .sql to pass in ogr2ogr because we don't want to pass too much argument on command with sql with open(query_path, 'w') as file: @@ -956,7 +956,7 @@ def ogr_export(outputtype, query=None, export_temp_path=None, point_query=None, file_paths.append(f"""{dump_temp_file_path}_point.dbf""") file_paths.append(f"""{dump_temp_file_path}_point.prj""") if line_query: - query_path = f"""{dump_temp_file_path}_sql.sql""" + query_path = f"""{dump_temp_file_path}_line.sql""" # writing to .sql to pass in ogr2ogr because we don't want to pass too much argument on command with sql with open(query_path, 'w') as file: @@ -978,7 +978,7 @@ def ogr_export(outputtype, query=None, export_temp_path=None, point_query=None, if poly_query: poly_file_path = f"""{dump_temp_file_path}_poly.shp""" - poly_query_path = f"""{dump_temp_file_path}_poly_sql.sql""" + poly_query_path = f"""{dump_temp_file_path}_poly.sql""" # writing to .sql to pass in ogr2ogr because we don't want to pass too much argument on command with sql with open(poly_query_path, 'w') as file: @@ -1029,81 +1029,6 @@ def query2geojson(con, extraction_query, dump_temp_file_path): f.write(post_geojson) logging.debug("Server side Query Result Post Processing Done") - # @staticmethod - # def query2shapefile(con, point_query, line_query, poly_query, point_schema, line_schema, poly_schema, dump_temp_file_path): - # """Function that transfer db query to shp""" - # # schema: it is a simple dictionary with geometry and properties as keys - # # schema = {'geometry': 'LineString','properties': {'test': 'int'}} - # file_paths = [] - # if point_query: - # logging.debug("Writing Point Shapefile") - - # schema = {'geometry': 'Point', 'properties': point_schema, } - # point_file_path = f"""{dump_temp_file_path}_point.shp""" - # # open a fiona object - # pointShp = fiona.open(point_file_path, mode='w', driver='ESRI Shapefile', encoding='UTF-8', - # schema=schema, crs="EPSG:4326") - - # with con.cursor(name='fetch_raw') as cursor: # using server side cursor - # cursor.itersize = 1000 # chunk size to get 1000 row at a time in client side - # cursor.execute(point_query) - # for row in cursor: - # pointShp.write(orjson.loads(row[0])) - - # cursor.close() # closing connection to avoid memory issues - # # close fiona object - # pointShp.close() - # file_paths.append(point_file_path) - # file_paths.append(f"""{dump_temp_file_path}_point.shx""") - # file_paths.append(f"""{dump_temp_file_path}_point.cpg""") - # file_paths.append(f"""{dump_temp_file_path}_point.dbf""") - # file_paths.append(f"""{dump_temp_file_path}_point.prj""") - - # if line_query: - # logging.debug("Writing Line Shapefile") - - # schema = {'geometry': 'LineString', 'properties': line_schema, } - # # print(schema) - # line_file_path = f"""{dump_temp_file_path}_line.shp""" - # with fiona.open(line_file_path, 'w', encoding='UTF-8', crs=from_epsg(4326), driver='ESRI Shapefile', schema=schema) as layer: - # with con.cursor(name='fetch_raw') as cursor: # using server side cursor - # cursor.itersize = 1000 # chunk size to get 1000 row at a time in client side - # cursor.execute(line_query) - # for row in cursor: - # layer.write(orjson.loads(row[0])) - - # cursor.close() # closing connection to avoid memory issues - # # close fiona object - # layer.close() - # file_paths.append(line_file_path) - # file_paths.append(f"""{dump_temp_file_path}_line.shx""") - # file_paths.append(f"""{dump_temp_file_path}_line.cpg""") - # file_paths.append(f"""{dump_temp_file_path}_line.dbf""") - # file_paths.append(f"""{dump_temp_file_path}_line.prj""") - - # if poly_query: - # logging.debug("Writing Poly Shapefile") - - # poly_file_path = f"""{dump_temp_file_path}_poly.shp""" - # schema = {'geometry': 'Polygon', 'properties': poly_schema, } - - # with fiona.open(poly_file_path, 'w', encoding='UTF-8', crs=from_epsg(4326), driver='ESRI Shapefile', schema=schema) as layer: - # with con.cursor(name='fetch_raw') as cursor: # using server side cursor - # cursor.itersize = 1000 # chunk size to get 1000 row at a time in client side - # cursor.execute(poly_query) - # for row in cursor: - # layer.write(orjson.loads(row[0])) - - # cursor.close() # closing connection to avoid memory issues - # # close fiona object - # layer.close() - # file_paths.append(poly_file_path) - # file_paths.append(f"""{dump_temp_file_path}_poly.shx""") - # file_paths.append(f"""{dump_temp_file_path}_poly.cpg""") - # file_paths.append(f"""{dump_temp_file_path}_poly.dbf""") - # file_paths.append(f"""{dump_temp_file_path}_poly.prj""") - # return file_paths - @staticmethod def get_grid_id(geom, cur): """Gets the intersecting related grid id for the geometry that is passed From 40e939414c0542440e6414ab3fdc45cad11b467a Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Tue, 27 Sep 2022 14:25:12 +0545 Subject: [PATCH 115/153] changed worker to dev debug setup and added fix for ogr2ogr devsetup --- docker-compose.yml | 2 +- src/galaxy/app.py | 19 +++++++++++-------- 2 files changed, 12 insertions(+), 9 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index c9c79d90..2e4a4e58 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -14,7 +14,7 @@ services: worker: build: . - command: celery --app API.api_worker worker --loglevel=INFO + command: celery --app API.api_worker worker --loglevel=DEBUG volumes: - .:/app depends_on: diff --git a/src/galaxy/app.py b/src/galaxy/app.py index 1c021a87..8271e10e 100644 --- a/src/galaxy/app.py +++ b/src/galaxy/app.py @@ -939,11 +939,13 @@ def ogr_export(outputtype, query=None, export_temp_path=None, point_query=None, # writing to .sql to pass in ogr2ogr because we don't want to pass too much argument on command with sql with open(query_path, 'w') as file: file.write(point_query) + with open('exports/test.sql', 'w') as file: + file.write(point_query) # standard file path for the generation point_file_path = f"""{dump_temp_file_path}_point.shp""" # command for ogr2ogr to generate file - cmd = '''ogr2ogr -overwrite -f \"{outputtype}\" {export_path} PG:"host={host} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( - outputtype=outputtype, export_path=point_file_path, host=db_items.get('host'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=query_path) + cmd = '''ogr2ogr -overwrite -f \"{outputtype}\" {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( + outputtype=outputtype, export_path=point_file_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=query_path) logging.debug("Calling ogr2ogr-Point Shapefile") run_ogr2ogr_cmd(cmd, binding_file_dir) # clear query file we don't need it anymore @@ -963,8 +965,8 @@ def ogr_export(outputtype, query=None, export_temp_path=None, point_query=None, file.write(line_query) line_file_path = f"""{dump_temp_file_path}_line.shp""" - cmd = '''ogr2ogr -overwrite -f \"{outputtype}\" {export_path} PG:"host={host} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( - outputtype=outputtype, export_path=line_file_path, host=db_items.get('host'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=query_path) + cmd = '''ogr2ogr -overwrite -f \"{outputtype}\" {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( + outputtype=outputtype, export_path=line_file_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=query_path) logging.debug("Calling ogr2ogr-Line Shapefile") run_ogr2ogr_cmd(cmd, binding_file_dir) # clear query file we don't need it anymore @@ -983,8 +985,8 @@ def ogr_export(outputtype, query=None, export_temp_path=None, point_query=None, # writing to .sql to pass in ogr2ogr because we don't want to pass too much argument on command with sql with open(poly_query_path, 'w') as file: file.write(poly_query) - cmd = '''ogr2ogr -overwrite -f \"{outputtype}\" {export_path} PG:"host={host} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( - outputtype=outputtype, export_path=poly_file_path, host=db_items.get('host'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=poly_query_path) + cmd = '''ogr2ogr -overwrite -f \"{outputtype}\" {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( + outputtype=outputtype, export_path=poly_file_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=poly_query_path) logging.debug("Calling ogr2ogr-Poly Shapefile") run_ogr2ogr_cmd(cmd, binding_file_dir) # clear query file we don't need it anymore @@ -997,8 +999,8 @@ def ogr_export(outputtype, query=None, export_temp_path=None, point_query=None, return file_paths else: # if it is not shapefile use standard ogr2ogr with their output format , will be useful for kml - cmd = '''ogr2ogr -overwrite -f \"{outputtype}\" {export_path} PG:"host={host} user={username} dbname={db} password={password}" -sql "{pg_sql_select}" -progress'''.format( - outputtype=outputtype, export_path=export_path, host=db_items.get('host'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=formatted_query) + cmd = '''ogr2ogr -overwrite -f \"{outputtype}\" {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql "{pg_sql_select}" -progress'''.format( + outputtype=outputtype, export_path=export_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=formatted_query) run_ogr2ogr_cmd(cmd, binding_file_dir) return export_path @@ -1131,6 +1133,7 @@ def run_ogr2ogr_cmd(cmd, binding_file_dir): """ try: # start_time=time.time() + logging.debug("Calling command : %s",cmd) process = subprocess.Popen( cmd, stdout=subprocess.PIPE, From 276b527ebb31dc4af4ddc7a06bf48c0afb2c19aa Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Tue, 27 Sep 2022 14:26:23 +0545 Subject: [PATCH 116/153] removed test sql --- src/galaxy/app.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/galaxy/app.py b/src/galaxy/app.py index 8271e10e..e2a202b6 100644 --- a/src/galaxy/app.py +++ b/src/galaxy/app.py @@ -939,8 +939,6 @@ def ogr_export(outputtype, query=None, export_temp_path=None, point_query=None, # writing to .sql to pass in ogr2ogr because we don't want to pass too much argument on command with sql with open(query_path, 'w') as file: file.write(point_query) - with open('exports/test.sql', 'w') as file: - file.write(point_query) # standard file path for the generation point_file_path = f"""{dump_temp_file_path}_point.shp""" # command for ogr2ogr to generate file From 8f12d73f7ea3cfe10bf7dc52453cdcaa53270490 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Wed, 28 Sep 2022 21:42:32 +0545 Subject: [PATCH 117/153] added geoflatbuff file system --- API/api_worker.py | 46 +++++++++++++++------------- src/galaxy/app.py | 54 +++++++++++++++++++-------------- src/galaxy/validation/models.py | 3 +- 3 files changed, 59 insertions(+), 44 deletions(-) diff --git a/API/api_worker.py b/API/api_worker.py index ac110a6c..85615576 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -31,6 +31,9 @@ def process_raw_data(self, incoming_scheme, incoming_host, params): try: start_time = dt.now() + bind_zip=True + if params.output_type == RawDataOutputType.FlatGeobuf.value: + bind_zip=False if params.output_type is None: # if no ouput type is supplied default is geojson output params.output_type = RawDataOutputType.GEOJSON.value @@ -56,34 +59,35 @@ def process_raw_data(self, incoming_scheme, incoming_host, params): status_code=400, content={"Error": "Request went too big"} ) - - logging.debug('Zip Binding Started !') - # saving file in temp directory instead of memory so that zipping file will not eat memory - zip_temp_path = f"""{root_dir_file}{exportname}.zip""" - zf = zipfile.ZipFile(zip_temp_path, "w", zipfile.ZIP_DEFLATED) - - directory = pathlib.Path(path) - for file_path in directory.iterdir(): - zf.write(file_path, arcname=file_path.name) - - # Compressing geojson file - zf.writestr("clipping_boundary.geojson", - orjson.dumps(dict(params.geometry))) - - zf.close() - logging.debug('Zip Binding Done !') + if bind_zip: + logging.debug('Zip Binding Started !') + # saving file in temp directory instead of memory so that zipping file will not eat memory + zip_temp_path = f"""{root_dir_file}{exportname}.zip""" + zf = zipfile.ZipFile(zip_temp_path, "w", zipfile.ZIP_DEFLATED) + + directory = pathlib.Path(path) + for file_path in directory.iterdir(): + zf.write(file_path, arcname=file_path.name) + + # Compressing geojson file + zf.writestr("clipping_boundary.geojson", + orjson.dumps(dict(params.geometry))) + + zf.close() + logging.debug('Zip Binding Done !') + else: + zip_temp_path = dump_temp_file[0] inside_file_size = 0 for temp_file in dump_temp_file: - # clearing tmp geojson file since it is already dumped to zip file we don't need it anymore if os.path.exists(temp_file): inside_file_size += os.path.getsize(temp_file) - - # remove the file that are just binded to zip file , we no longer need to store it - remove_file(path) + if bind_zip: + # remove the file that are just binded to zip file , we no longer need to store it + remove_file(path) # check if download url will be generated from s3 or not from config if use_s3_to_upload: file_transfer_obj = S3FileTransfer() - download_url = file_transfer_obj.upload(zip_temp_path, exportname) + download_url = file_transfer_obj.upload(zip_temp_path, exportname, bind_zip=False if bind_zip else True) else: # getting from config in case api and frontend is not hosted on same url client_host = config.get( diff --git a/src/galaxy/app.py b/src/galaxy/app.py index e2a202b6..d6ae83fe 100644 --- a/src/galaxy/app.py +++ b/src/galaxy/app.py @@ -995,11 +995,18 @@ def ogr_export(outputtype, query=None, export_temp_path=None, point_query=None, file_paths.append(f"""{dump_temp_file_path}_poly.dbf""") file_paths.append(f"""{dump_temp_file_path}_poly.prj""") return file_paths + + elif outputtype == RawDataOutputType.FlatGeobuf.value: + cmd = '''ogr2ogr -overwrite -f FlatGeobuf {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql "{pg_sql_select}" -progress'''.format( + export_path=export_temp_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=formatted_query) + run_ogr2ogr_cmd(cmd, binding_file_dir, use_limit=False) + return export_path + else: # if it is not shapefile use standard ogr2ogr with their output format , will be useful for kml cmd = '''ogr2ogr -overwrite -f \"{outputtype}\" {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql "{pg_sql_select}" -progress'''.format( - outputtype=outputtype, export_path=export_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=formatted_query) - run_ogr2ogr_cmd(cmd, binding_file_dir) + outputtype=outputtype, export_path=export_temp_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=formatted_query) + run_ogr2ogr_cmd(cmd, binding_file_dir, use_limit=False) return export_path @staticmethod @@ -1118,7 +1125,7 @@ def check_status(self): return str(behind_time[0][0]) -def run_ogr2ogr_cmd(cmd, binding_file_dir): +def run_ogr2ogr_cmd(cmd, binding_file_dir, use_limit=True): """Runs command and monitors the file size until the process runs Args: @@ -1131,7 +1138,7 @@ def run_ogr2ogr_cmd(cmd, binding_file_dir): """ try: # start_time=time.time() - logging.debug("Calling command : %s",cmd) + logging.debug("Calling command : %s", cmd) process = subprocess.Popen( cmd, stdout=subprocess.PIPE, @@ -1139,22 +1146,22 @@ def run_ogr2ogr_cmd(cmd, binding_file_dir): shell=True, preexec_fn=os.setsid ) - while process.poll() is None: - # if (time.time()-start_time)/60 > 25 : - # raise ValueError("Shapefile Exceed Limit export") - - size = 0 - for ele in os.scandir(binding_file_dir): - size += os.path.getsize(ele) - # print(size/1000000) # in MB - if size / 1000000 > shp_limit: - logging.warn( - f"Killing ogr2ogr because it exceed {shp_limit} MB...") - # process.kill() - # os.killpg(os.getpgid(process.pid), signal.SIGTERM) # Send the signal to all the process groups - # shutil.rmtree(binding_file_dir) - raise HTTPException( - status_code=404, detail=f"Shapefile Exceed {shp_limit} MB Limit") + if use_limit: + while process.poll() is None: + # if (time.time()-start_time)/60 > 25 : + # raise ValueError("Shapefile Exceed Limit export") + size = 0 + for ele in os.scandir(binding_file_dir): + size += os.path.getsize(ele) + # print(size/1000000) # in MB + if size / 1000000 > shp_limit: + logging.warn( + f"Killing ogr2ogr because it exceed {shp_limit} MB...") + # process.kill() + # os.killpg(os.getpgid(process.pid), signal.SIGTERM) # Send the signal to all the process groups + # shutil.rmtree(binding_file_dir) + raise HTTPException( + status_code=404, detail=f"Limit Exceed {shp_limit} MB Limit") logging.debug(process.stdout.read()) except Exception as ex: @@ -1201,13 +1208,16 @@ def get_bucket_location(self, bucket_name): raise ex return bucket_location or 'us-east-1' - def upload(self, file_path, file_prefix): + def upload(self, file_path, file_prefix, bind_zip=True): """Used for transferring file to s3 after reading path from the user , It will wait for the upload to complete Parameters :file_path --- your local file path to upload , file_prefix -- prefix for the filename which is stored sample function call : S3FileTransfer.transfer(file_path="exports",file_prefix="upload_test") """ - file_name = f"{file_prefix}.zip" + if bind_zip: + file_name = f"{file_prefix}.zip" + else: + file_name = file_prefix # instantiate upload start_time = time.time() diff --git a/src/galaxy/validation/models.py b/src/galaxy/validation/models.py index 90c1e50b..4b806aae 100644 --- a/src/galaxy/validation/models.py +++ b/src/galaxy/validation/models.py @@ -477,8 +477,9 @@ class TeamMemberFunction(Enum): class RawDataOutputType (Enum): GEOJSON = "GeoJSON" - KML = "KML" + KML = "kml" SHAPEFILE = "shp" + FlatGeobuf = "fgb" MBTILES = "MBTILES" # fully experimental for now From 80deb65d5f1d8c1d9f7a2433e4c1ad9e6caa9d5d Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Wed, 28 Sep 2022 22:38:18 +0545 Subject: [PATCH 118/153] added . output type in file name --- API/api_worker.py | 2 +- src/galaxy/app.py | 11 ++++------- 2 files changed, 5 insertions(+), 8 deletions(-) diff --git a/API/api_worker.py b/API/api_worker.py index 85615576..ff0c8524 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -87,7 +87,7 @@ def process_raw_data(self, incoming_scheme, incoming_host, params): # check if download url will be generated from s3 or not from config if use_s3_to_upload: file_transfer_obj = S3FileTransfer() - download_url = file_transfer_obj.upload(zip_temp_path, exportname, bind_zip=False if bind_zip else True) + download_url = file_transfer_obj.upload(zip_temp_path, exportname, file_suffix='zip' if bind_zip else params.output_type.lower()) else: # getting from config in case api and frontend is not hosted on same url client_host = config.get( diff --git a/src/galaxy/app.py b/src/galaxy/app.py index d6ae83fe..9cc8c7d2 100644 --- a/src/galaxy/app.py +++ b/src/galaxy/app.py @@ -1133,7 +1133,7 @@ def run_ogr2ogr_cmd(cmd, binding_file_dir, use_limit=True): binding_file_dir (_type_): _description_ Raises: - ValueError: Shapefile exceed 4GB limit + ValueError: File exceed 4GB limit ValueError: Binding failed """ try: @@ -1208,16 +1208,13 @@ def get_bucket_location(self, bucket_name): raise ex return bucket_location or 'us-east-1' - def upload(self, file_path, file_prefix, bind_zip=True): + def upload(self, file_path, file_name, file_suffix='zip'): """Used for transferring file to s3 after reading path from the user , It will wait for the upload to complete Parameters :file_path --- your local file path to upload , file_prefix -- prefix for the filename which is stored sample function call : S3FileTransfer.transfer(file_path="exports",file_prefix="upload_test") """ - if bind_zip: - file_name = f"{file_prefix}.zip" - else: - file_name = file_prefix + file_name = f"{file_name}.{file_suffix}" # instantiate upload start_time = time.time() @@ -1232,7 +1229,7 @@ def upload(self, file_path, file_prefix, bind_zip=True): logging.error(ex) raise ex logging.debug("Uploaded %s in %s sec", - file_prefix, time.time() - start_time) + file_name, time.time() - start_time) # generate the download url bucket_location = self.get_bucket_location(bucket_name=BUCKET_NAME) object_url = f"""https://s3.{bucket_location}.amazonaws.com/{BUCKET_NAME}/{file_name}""" From eddbbe40003ce984b176d915c4a7f17637cbce2f Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Thu, 29 Sep 2022 15:23:12 +0545 Subject: [PATCH 119/153] formatted code , used path option for paths improved workflow for rawdata --- API/api_worker.py | 71 ++++------ API/download_export.py | 45 ------- API/main.py | 16 ++- API/raw_data.py | 52 +------- API/tasks.py | 46 +++++++ API/test_router.py | 16 --- docs/CONFIG_DOC.md | 2 +- src/galaxy/app.py | 221 +++++++++++++------------------- src/galaxy/config.py | 7 +- src/galaxy/validation/models.py | 2 +- 10 files changed, 177 insertions(+), 301 deletions(-) delete mode 100644 API/download_export.py create mode 100644 API/tasks.py delete mode 100644 API/test_router.py diff --git a/API/api_worker.py b/API/api_worker.py index ff0c8524..7c6e1f5f 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -28,46 +28,28 @@ @celery.task(bind=True, name="process_raw_data") -def process_raw_data(self, incoming_scheme, incoming_host, params): +def process_raw_data(self, params): try: start_time = dt.now() bind_zip=True if params.output_type == RawDataOutputType.FlatGeobuf.value: bind_zip=False - if params.output_type is None: # if no ouput type is supplied default is geojson output - params.output_type = RawDataOutputType.GEOJSON.value - # unique id for zip file and geojson for each export - if params.file_name: - # need to format string from space to _ because it is filename , may be we need to filter special character as well later on - formatted_file_name = format_file_name_str(params.file_name) - # exportname = f"{formatted_file_name}_{datetime.now().isoformat()}_{str(self.request.id)}" - exportname = f"""{formatted_file_name}_{str(self.request.id)}_{params.output_type}""" # disabled date for now - - else: - # exportname = f"Raw_Export_{datetime.now().isoformat()}_{str(self.request.id)}" - exportname = f"Raw_Export_{str(self.request.id)}_{params.output_type}" + exportname = f"{format_file_name_str(params.file_name) if params.file_name else 'Export'}_{str(self.request.id)}_{params.output_type}" logging.info("Request %s received", exportname) - dump_temp_file, geom_area, root_dir_file = RawData( - params).extract_current_data(exportname) - path = f"""{root_dir_file}{exportname}/""" - - if os.path.exists(path) is False: - return JSONResponse( - status_code=400, - content={"Error": "Request went too big"} - ) + geom_area, working_dir = RawData(params).extract_current_data(exportname) + inside_file_size = 0 if bind_zip: logging.debug('Zip Binding Started !') # saving file in temp directory instead of memory so that zipping file will not eat memory - zip_temp_path = f"""{root_dir_file}{exportname}.zip""" - zf = zipfile.ZipFile(zip_temp_path, "w", zipfile.ZIP_DEFLATED) + upload_file_path = os.path.join(working_dir,os.pardir,f"{exportname}.zip") - directory = pathlib.Path(path) - for file_path in directory.iterdir(): + zf = zipfile.ZipFile(upload_file_path, "w", zipfile.ZIP_DEFLATED) + for file_path in pathlib.Path(working_dir).iterdir(): zf.write(file_path, arcname=file_path.name) + inside_file_size += os.path.getsize(file_path) # Compressing geojson file zf.writestr("clipping_boundary.geojson", @@ -76,38 +58,29 @@ def process_raw_data(self, incoming_scheme, incoming_host, params): zf.close() logging.debug('Zip Binding Done !') else: - zip_temp_path = dump_temp_file[0] - inside_file_size = 0 - for temp_file in dump_temp_file: - if os.path.exists(temp_file): - inside_file_size += os.path.getsize(temp_file) - if bind_zip: - # remove the file that are just binded to zip file , we no longer need to store it - remove_file(path) + for file_path in pathlib.Path(working_dir).iterdir(): + upload_file_path=file_path + inside_file_size += os.path.getsize(file_path) + break # only take one file inside dir , if contains many it should be inside zip # check if download url will be generated from s3 or not from config if use_s3_to_upload: file_transfer_obj = S3FileTransfer() - download_url = file_transfer_obj.upload(zip_temp_path, exportname, file_suffix='zip' if bind_zip else params.output_type.lower()) + download_url = file_transfer_obj.upload(upload_file_path, exportname, file_suffix='zip' if bind_zip else params.output_type.lower()) else: - # getting from config in case api and frontend is not hosted on same url - client_host = config.get( - "API_CONFIG", "api_host", fallback=f"""{incoming_scheme}://{incoming_host}""") - client_port = config.get("API_CONFIG", "api_port", fallback=8000) - - if client_port: - download_url = f"""{client_host}:{client_port}/v1/exports/{exportname}.zip""" # disconnected download portion from this endpoint because when there will be multiple hits at a same time we don't want function to get stuck waiting for user to download the file and deliver the response , we want to reduce waiting time and free function ! - else: - download_url = f"""{client_host}/v1/exports/{exportname}.zip""" # disconnected download portion from this endpoint because when there will be multiple hits at a same time we don't want function to get stuck waiting for user to download the file and deliver the response , we want to reduce waiting time and free function ! + download_url = str(upload_file_path) # give the static file download url back to user served from fastapi static export path # getting file size of zip , units are in bytes converted to mb in response - zip_file_size = os.path.getsize(zip_temp_path) + zip_file_size = os.path.getsize(upload_file_path) # watches the status code of the link provided and deletes the file if it is 200 - watch_s3_upload(download_url, zip_temp_path) + if use_s3_to_upload: + watch_s3_upload(download_url, upload_file_path) + if use_s3_to_upload or bind_zip: + #remove working dir from the machine , if its inside zip / uploaded we no longer need it + remove_file(working_dir) response_time = dt.now() - start_time response_time_str = str(response_time) - logging.info( - f"Done Export : {exportname} of {round(inside_file_size/1000000)} MB / {geom_area} sqkm in {response_time_str}") - return {"download_url": download_url, "file_name": exportname, "process_time": response_time_str, "query_area": f"""{geom_area} Sq Km """, "binded_file_size": f"""{round(inside_file_size/1000000,2)} MB""", "zip_file_size_bytes": zip_file_size} + logging.info(f"Done Export : {exportname} of {round(inside_file_size/1000000)} MB / {geom_area} sqkm in {response_time_str}") + return {"download_url": download_url, "file_name": exportname, "process_time": response_time_str, "query_area": f"{geom_area} Sq Km ", "binded_file_size": f"{round(inside_file_size/1000000,2)} MB", "zip_file_size_bytes": zip_file_size} except Exception as ex: raise ex diff --git a/API/download_export.py b/API/download_export.py deleted file mode 100644 index d8645c63..00000000 --- a/API/download_export.py +++ /dev/null @@ -1,45 +0,0 @@ -# Copyright (C) 2021 Humanitarian OpenStreetmap Team - -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU Affero General Public License as -# published by the Free Software Foundation, either version 3 of the -# License, or (at your option) any later version. - -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU Affero General Public License for more details. - -# You should have received a copy of the GNU Affero General Public License -# along with this program. If not, see . - -# Humanitarian OpenStreetmap Team -# 1100 13th Street NW Suite 800 Washington, D.C. 20005 -# - -"""[Router Responsible for downloading exports ] -""" -from fastapi import APIRouter -from fastapi_versioning import version -from src.galaxy.config import export_path -from fastapi.responses import FileResponse -from os.path import exists - - -router = APIRouter(prefix="") - - -@router.get("/exports/{file_name}") -@version(1) -def download_export(file_name: str): - """Used for Delivering our export to user. - Returns zip file if it is present on our server if not returns error - """ - zip_temp_path = f"""{export_path}{file_name}""" - if exists(zip_temp_path): - response = FileResponse(zip_temp_path, media_type="application/zip") - response.headers["Content-Disposition"] = f"attachment; filename={file_name}" - return response - else: - raise ValueError( - "File Doesn't Exist or has been cleared up from system") diff --git a/API/main.py b/API/main.py index 19e2d9b2..4e398b60 100644 --- a/API/main.py +++ b/API/main.py @@ -33,15 +33,14 @@ from .hashtag_stats import router as hashtag_router from .tasking_manager import router as tm_router from .raw_data import router as raw_data_router -from .download_export import router as download_router -# from .test_router import router as test_router +from .tasks import router as tasks_router from .status import router as status_router from src.galaxy.db_session import database_instance -from src.galaxy.config import limiter, use_connection_pooling, use_s3_to_upload, logger as logging, config +from src.galaxy.config import limiter, export_path, use_connection_pooling, use_s3_to_upload, logger as logging, config from fastapi_versioning import VersionedFastAPI from slowapi import _rate_limit_exceeded_handler from slowapi.errors import RateLimitExceeded - +from fastapi.staticfiles import StaticFiles # only use sentry if it is specified in config blocks if config.get("SENTRY", "dsn", fallback=None): @@ -53,6 +52,7 @@ traces_sample_rate=config.get("SENTRY", "rate") ) + run_env = config.get("API_CONFIG", "env", fallback='prod') if run_env.lower() == 'dev': # This is used for local setup for auth login @@ -76,15 +76,17 @@ app.include_router(tm_router) app.include_router(status_router) app.include_router(raw_data_router) +app.include_router(tasks_router) + -if use_s3_to_upload is False: - # only mount the disk if config is set to disk - app.include_router(download_router) app = VersionedFastAPI(app, enable_latest=True, version_format='{major}', prefix_format='/v{major}') +if use_s3_to_upload is False: + # only mount the disk if config is set to disk + app.mount("/exports", StaticFiles(directory=export_path), name="exports") app.state.limiter = limiter app.add_exception_handler(RateLimitExceeded, _rate_limit_exceeded_handler) diff --git a/API/raw_data.py b/API/raw_data.py index 7f634c6f..c4e318fa 100644 --- a/API/raw_data.py +++ b/API/raw_data.py @@ -38,8 +38,7 @@ from src.galaxy.query_builder.builder import format_file_name_str from src.galaxy.validation.models import RawDataCurrentParams, RawDataOutputType from src.galaxy.app import RawData, S3FileTransfer -from celery.result import AsyncResult -from .api_worker import process_raw_data, celery +from .api_worker import process_raw_data from src.galaxy.config import export_rate_limit, use_s3_to_upload, logger as logging, config, limiter router = APIRouter(prefix="/raw-data") @@ -337,8 +336,7 @@ def watch_s3_upload(url: str, path: str) -> None: @limiter.limit(f"{export_rate_limit}/minute") @version(2) def get_current_snapshot_of_osm_data( - params: RawDataCurrentParams, background_tasks: BackgroundTasks, request: Request -): + params: RawDataCurrentParams, request: Request): """Generates the current raw OpenStreetMap data available on database based on the input geometry, query and spatial features Steps to Run Snapshot : @@ -346,9 +344,9 @@ def get_current_snapshot_of_osm_data( 1. Post the your request here and your request will be on queue, endpoint will return as following : { "task_id": "your task_id", - "track_link": "/current-snapshot/tasks/task_id/" + "track_link": "/tasks/task_id/" } - 2. Now navigate to /current-snapshot/tasks/ with your task id to track progress and result + 2. Now navigate to /tasks/ with your task id to track progress and result Args: @@ -498,43 +496,5 @@ def get_current_snapshot_of_osm_data( """ # def get_current_data(params:RawDataCurrentParams,background_tasks: BackgroundTasks, user_data=Depends(login_required)): # this will use osm login makes it restrict login - task = process_raw_data.delay(request.url.scheme, request.client.host, params) - return JSONResponse({"task_id": task.id, "track_link": f"/current-snapshot/tasks/{task.id}/"}) - - -@router.get("/current-snapshot/tasks/{task_id}/") -@version(2) -def get_task_status(task_id): - """Tracks the request from the task id provided by galaxy api for the request - - Args: - - task_id ([type]): [Unique id provided on response from /current-snapshot/] - - Returns: - - id: Id of the task - status : SUCCESS / PENDING - result : Result of task - - Successful task will have additional nested json inside row as following : - Example response of rawdata current snapshot response : - - - { - "id": "3fded368-456f-4ef4-a1b8-c099a7f77ca4", - "status": "SUCCESS", - "result": { - "download_url": "https://s3.us-east-1.amazonaws.com/exports-stage.hotosm.org/Raw_Export_3fded368-456f-4ef4-a1b8-c099a7f77ca4_GeoJSON.zip", - "file_name": "Raw_Export_3fded368-456f-4ef4-a1b8-c099a7f77ca4_GeoJSON", - "response_time": "0:00:12.175976", - "query_area": "6 Sq Km ", - "binded_file_size": "7 MB", - "zip_file_size_bytes": 1331601 - - } - - """ - task_result = AsyncResult(task_id, app=celery) - result = { "id": task_id, "status": task_result.state, "result": task_result.result if task_result.status == 'SUCCESS' else None } - return JSONResponse(result) + task = process_raw_data.delay(params) + return JSONResponse({"task_id": task.id, "track_link": f"/tasks/status/{task.id}/"}) \ No newline at end of file diff --git a/API/tasks.py b/API/tasks.py new file mode 100644 index 00000000..04c87cbc --- /dev/null +++ b/API/tasks.py @@ -0,0 +1,46 @@ +from celery.result import AsyncResult +from .api_worker import celery +from fastapi import APIRouter +from fastapi_versioning import version +from fastapi.responses import JSONResponse + + +router = APIRouter(prefix="/tasks") + + +@router.get("/status/{task_id}/") +@version(2) +def get_task_status(task_id): + """Tracks the request from the task id provided by galaxy api for the request + + Args: + + task_id ([type]): [Unique id provided on response from /current-snapshot/] + + Returns: + + id: Id of the task + status : SUCCESS / PENDING + result : Result of task + + Successful task will have additional nested json inside row as following : + Example response of rawdata current snapshot response : + + + { + "id": "3fded368-456f-4ef4-a1b8-c099a7f77ca4", + "status": "SUCCESS", + "result": { + "download_url": "https://s3.us-east-1.amazonaws.com/exports-stage.hotosm.org/Raw_Export_3fded368-456f-4ef4-a1b8-c099a7f77ca4_GeoJSON.zip", + "file_name": "Raw_Export_3fded368-456f-4ef4-a1b8-c099a7f77ca4_GeoJSON", + "response_time": "0:00:12.175976", + "query_area": "6 Sq Km ", + "binded_file_size": "7 MB", + "zip_file_size_bytes": 1331601 + + } + + """ + task_result = AsyncResult(task_id, app=celery) + result = { "id": task_id, "status": task_result.state, "result": task_result.result if task_result.status == 'SUCCESS' else None } + return JSONResponse(result) \ No newline at end of file diff --git a/API/test_router.py b/API/test_router.py deleted file mode 100644 index 95badfda..00000000 --- a/API/test_router.py +++ /dev/null @@ -1,16 +0,0 @@ -from fastapi import APIRouter -from fastapi_versioning import version - -router = APIRouter(prefix="/test") - - -@router.get("/galaxy/") -@version(1) -def galaxy_says_v1(): - return "Hello" - - -@router.get("/galaxy/") -@version(2) -def galaxy_says_v2(): - return "Hi" diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index c7d8f978..09e10691 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -205,7 +205,7 @@ You can further customize API if you wish with API_CONFIG Block ``` [API_CONFIG] -export_path=exports/ # used to store export path +export_path=exports # used to store export path api_host=http://127.0.0.1 # you can define this if you have different host api_port=8000 max_area=100000 # max area to support for rawdata input diff --git a/src/galaxy/app.py b/src/galaxy/app.py index 9cc8c7d2..8794f818 100644 --- a/src/galaxy/app.py +++ b/src/galaxy/app.py @@ -917,97 +917,77 @@ def extract_historical_data(self): return RawData.to_geojson(results) @staticmethod - def ogr_export(outputtype, query=None, export_temp_path=None, point_query=None, line_query=None, poly_query=None, dump_temp_file_path=None, binding_file_dir=None): + def ogr_export_shp(point_query, line_query, poly_query, working_dir, file_name): + """Function written to support ogr type extractions as well , In this way we will be able to support all file formats supported by Ogr , Currently it is slow when dataset gets bigger as compared to our own conversion method but rich in feature and data types even though it is slow""" + db_items = get_db_connection_params("RAW_DATA") + if point_query: + query_path=os.path.join(working_dir,'point.sql') + # writing to .sql to pass in ogr2ogr because we don't want to pass too much argument on command with sql + with open(query_path, 'w', encoding="UTF-8") as file: + file.write(point_query) + # standard file path for the generation + point_file_path=os.path.join(working_dir,f"{file_name}_point.shp") + # command for ogr2ogr to generate file + cmd = '''ogr2ogr -overwrite -f ESRI Shapefile {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( + export_path=point_file_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=query_path) + logging.debug("Calling ogr2ogr-Point Shapefile") + run_ogr2ogr_cmd(cmd) + # clear query file we don't need it anymore + os.remove(query_path) + + if line_query: + query_path=os.path.join(working_dir,'line.sql') + # writing to .sql to pass in ogr2ogr because we don't want to pass too much argument on command with sql + with open(query_path, 'w', encoding="UTF-8") as file: + file.write(line_query) + line_file_path=os.path.join(working_dir,f"{file_name}_line.shp") + cmd = '''ogr2ogr -overwrite -f ESRI Shapefile {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( + export_path=line_file_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=query_path) + logging.debug("Calling ogr2ogr-Line Shapefile") + run_ogr2ogr_cmd(cmd) + # clear query file we don't need it anymore + os.remove(query_path) + + if poly_query: + query_path=os.path.join(working_dir,'poly.sql') + poly_file_path=os.path.join(working_dir,f"{file_name}_poly.shp") + # writing to .sql to pass in ogr2ogr because we don't want to pass too much argument on command with sql + with open(query_path, 'w', encoding="UTF-8") as file: + file.write(poly_query) + cmd = '''ogr2ogr -overwrite -f ESRI Shapefile {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( + export_path=poly_file_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=poly_query_path) + logging.debug("Calling ogr2ogr-Poly Shapefile") + run_ogr2ogr_cmd(cmd) + # clear query file we don't need it anymore + os.remove(query_path) + + @staticmethod + def ogr_export(query, outputtype, working_dir, dump_temp_path): """Function written to support ogr type extractions as well , In this way we will be able to support all file formats supported by Ogr , Currently it is slow when dataset gets bigger as compared to our own conversion method but rich in feature and data types even though it is slow""" db_items = get_db_connection_params("RAW_DATA") # format query if it has " in string" - formatted_query = '' - if query: - formatted_query = query.replace('"', '\\"') + query_path=os.path.join(working_dir,'export_query.sql') + # writing to .sql to pass in ogr2ogr because we don't want to pass too much argument on command with sql + with open(query_path, 'w', encoding="UTF-8") as file: + file.write(query) # for mbtiles we need additional input as well i.e. minzoom and maxzoom , setting default at max=22 and min=10 if outputtype == RawDataOutputType.MBTILES.value: - cmd = '''ogr2ogr -overwrite -f \"{outputtype}\" -dsco MINZOOM=10 -dsco MAXZOOM=22 {export_path} PG:"host={host} user={username} dbname={db} password={password}" -sql "{pg_sql_select}" -progress'''.format( - outputtype=outputtype, export_path=export_temp_path, host=db_items.get('host'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=formatted_query) - - elif outputtype == RawDataOutputType.SHAPEFILE.value: - # if it is shapefile it needs different logic for point,line and polygon - file_paths = [] - outputtype = "ESRI Shapefile" - if point_query: - query_path = f"""{dump_temp_file_path}_point.sql""" - - # writing to .sql to pass in ogr2ogr because we don't want to pass too much argument on command with sql - with open(query_path, 'w') as file: - file.write(point_query) - # standard file path for the generation - point_file_path = f"""{dump_temp_file_path}_point.shp""" - # command for ogr2ogr to generate file - cmd = '''ogr2ogr -overwrite -f \"{outputtype}\" {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( - outputtype=outputtype, export_path=point_file_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=query_path) - logging.debug("Calling ogr2ogr-Point Shapefile") - run_ogr2ogr_cmd(cmd, binding_file_dir) - # clear query file we don't need it anymore - os.remove(query_path) - - file_paths.append(point_file_path) - # need filepath to zip in to file and clear them after zipping - file_paths.append(f"""{dump_temp_file_path}_point.shx""") - # file_paths.append(f"""{dump_temp_file_path}_point.cpg""") - file_paths.append(f"""{dump_temp_file_path}_point.dbf""") - file_paths.append(f"""{dump_temp_file_path}_point.prj""") - if line_query: - query_path = f"""{dump_temp_file_path}_line.sql""" - - # writing to .sql to pass in ogr2ogr because we don't want to pass too much argument on command with sql - with open(query_path, 'w') as file: - file.write(line_query) - - line_file_path = f"""{dump_temp_file_path}_line.shp""" - cmd = '''ogr2ogr -overwrite -f \"{outputtype}\" {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( - outputtype=outputtype, export_path=line_file_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=query_path) - logging.debug("Calling ogr2ogr-Line Shapefile") - run_ogr2ogr_cmd(cmd, binding_file_dir) - # clear query file we don't need it anymore - os.remove(query_path) - - file_paths.append(line_file_path) - file_paths.append(f"""{dump_temp_file_path}_line.shx""") - # file_paths.append(f"""{dump_temp_file_path}_line.cpg""") - file_paths.append(f"""{dump_temp_file_path}_line.dbf""") - file_paths.append(f"""{dump_temp_file_path}_line.prj""") - if poly_query: - - poly_file_path = f"""{dump_temp_file_path}_poly.shp""" - poly_query_path = f"""{dump_temp_file_path}_poly.sql""" - - # writing to .sql to pass in ogr2ogr because we don't want to pass too much argument on command with sql - with open(poly_query_path, 'w') as file: - file.write(poly_query) - cmd = '''ogr2ogr -overwrite -f \"{outputtype}\" {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( - outputtype=outputtype, export_path=poly_file_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=poly_query_path) - logging.debug("Calling ogr2ogr-Poly Shapefile") - run_ogr2ogr_cmd(cmd, binding_file_dir) - # clear query file we don't need it anymore - os.remove(poly_query_path) - file_paths.append(poly_file_path) - file_paths.append(f"""{dump_temp_file_path}_poly.shx""") - # file_paths.append(f"""{dump_temp_file_path}_poly.cpg""") - file_paths.append(f"""{dump_temp_file_path}_poly.dbf""") - file_paths.append(f"""{dump_temp_file_path}_poly.prj""") - return file_paths - - elif outputtype == RawDataOutputType.FlatGeobuf.value: - cmd = '''ogr2ogr -overwrite -f FlatGeobuf {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql "{pg_sql_select}" -progress'''.format( - export_path=export_temp_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=formatted_query) - run_ogr2ogr_cmd(cmd, binding_file_dir, use_limit=False) - return export_path + cmd = '''ogr2ogr -overwrite -f MBTILES -dsco MINZOOM=10 -dsco MAXZOOM=22 {export_path} PG:"host={host} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( + export_path=dump_temp_path, host=db_items.get('host'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=query_path) + run_ogr2ogr_cmd(cmd) - else: - # if it is not shapefile use standard ogr2ogr with their output format , will be useful for kml - cmd = '''ogr2ogr -overwrite -f \"{outputtype}\" {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql "{pg_sql_select}" -progress'''.format( - outputtype=outputtype, export_path=export_temp_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=formatted_query) - run_ogr2ogr_cmd(cmd, binding_file_dir, use_limit=False) - return export_path + if outputtype == RawDataOutputType.FlatGeobuf.value: + cmd = '''ogr2ogr -overwrite -f FlatGeobuf {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( + export_path=dump_temp_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=query_path) + run_ogr2ogr_cmd(cmd) + + if outputtype == RawDataOutputType.KML.value: + cmd = '''ogr2ogr -overwrite -f KML {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( + export_path=dump_temp_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=query_path) + run_ogr2ogr_cmd(cmd) + + # clear query file we don't need it anymore + os.remove(query_path) @staticmethod def query2geojson(con, extraction_query, dump_temp_file_path): @@ -1067,7 +1047,8 @@ def extract_current_data(self, exportname): exportname: takes filename as argument to create geojson file passed from routers Returns: - _file_path_: geojson file location path + geom_area: area of polygon supplied + working_dir: dir where results are saved """ # first check either geometry needs grid or not for querying grid_id, geometry_dump, geom_area = RawData.get_grid_id( @@ -1079,15 +1060,12 @@ def extract_current_data(self, exportname): output_type = self.params.output_type # Check whether the export path exists or not - isExist = os.path.exists(export_path) - if not isExist: + working_dir=os.path.join(export_path, exportname) + if not os.path.exists(working_dir): # Create a exports directory because it does not exist - os.makedirs(export_path) - root_dir_file = export_path - path = f"""{export_path}{exportname}/""" - os.makedirs(path) + os.makedirs(working_dir) # create file path with respect to of output type - dump_temp_file_path = f"""{path}{exportname}.{output_type.lower()}""" + dump_temp_file_path = os.path.join(working_dir, f"{self.params.file_name if self.params.file_name else 'Export'}.{output_type.lower()}") try: # currently we have only geojson binding function written other than that we have depend on ogr if output_type == RawDataOutputType.GEOJSON.value: @@ -1096,17 +1074,12 @@ def extract_current_data(self, exportname): elif output_type == RawDataOutputType.SHAPEFILE.value: point_query, line_query, poly_query, point_schema, line_schema, poly_schema = extract_geometry_type_query( self.params, ogr_export=True) - # point_query, line_query, poly_query, point_schema, line_schema, poly_schema = extract_geometry_type_query( - # self.params,ogr_export=True) - dump_temp_file_path = f"""{path}{exportname}""" - filepaths = RawData.ogr_export(outputtype=output_type, point_query=point_query, line_query=line_query, - poly_query=poly_query, dump_temp_file_path=dump_temp_file_path, binding_file_dir=path) # using ogr2ogr - # filepaths = RawData.query2shapefile(self.con, point_query, line_query, poly_query, point_schema, line_schema, poly_schema, dump_temp_file_path) #using fiona - return filepaths, geom_area, root_dir_file + RawData.ogr_export_shp(point_query=point_query, line_query=line_query, + poly_query=poly_query, working_dir=working_dir, file_name=self.params.file_name if self.params.file_name else 'Export') # using ogr2ogr else: - filepaths = RawData.ogr_export(query=raw_currentdata_extraction_query(self.params, grid_id, geometry_dump, ogr_export=True), - export_temp_path=dump_temp_file_path, outputtype=output_type, binding_file_dir=path) # uses ogr export to export - return [dump_temp_file_path], geom_area, root_dir_file + RawData.ogr_export(query=raw_currentdata_extraction_query(self.params, grid_id, geometry_dump, ogr_export=True), + outputtype=output_type, dump_temp_path=dump_temp_file_path, working_dir=working_dir ) # uses ogr export to export + return geom_area, working_dir except Exception as ex: logging.error(ex) raise ex @@ -1125,7 +1098,7 @@ def check_status(self): return str(behind_time[0][0]) -def run_ogr2ogr_cmd(cmd, binding_file_dir, use_limit=True): +def run_ogr2ogr_cmd(cmd): """Runs command and monitors the file size until the process runs Args: @@ -1133,44 +1106,26 @@ def run_ogr2ogr_cmd(cmd, binding_file_dir, use_limit=True): binding_file_dir (_type_): _description_ Raises: - ValueError: File exceed 4GB limit - ValueError: Binding failed + Exception: If process gets failed """ try: # start_time=time.time() logging.debug("Calling command : %s", cmd) - process = subprocess.Popen( + process = subprocess.check_output( cmd, - stdout=subprocess.PIPE, - stderr=subprocess.PIPE, + stderr=subprocess.STDOUT, shell=True, - preexec_fn=os.setsid + preexec_fn=os.setsid, + timeout=60*60*2 #setting timeout of 2 hour ) - if use_limit: - while process.poll() is None: - # if (time.time()-start_time)/60 > 25 : - # raise ValueError("Shapefile Exceed Limit export") - size = 0 - for ele in os.scandir(binding_file_dir): - size += os.path.getsize(ele) - # print(size/1000000) # in MB - if size / 1000000 > shp_limit: - logging.warn( - f"Killing ogr2ogr because it exceed {shp_limit} MB...") - # process.kill() - # os.killpg(os.getpgid(process.pid), signal.SIGTERM) # Send the signal to all the process groups - # shutil.rmtree(binding_file_dir) - raise HTTPException( - status_code=404, detail=f"Limit Exceed {shp_limit} MB Limit") - - logging.debug(process.stdout.read()) + logging.debug(process) except Exception as ex: logging.error(ex) - process.kill() - # Send the signal to all the process groups - os.killpg(os.getpgid(process.pid), signal.SIGTERM) - if os.path.exists(binding_file_dir): - shutil.rmtree(binding_file_dir) + # process.kill() + # # Send the signal to all the process groups + # os.killpg(os.getpgid(process.pid), signal.SIGTERM) + # if os.path.exists(binding_file_dir): + # shutil.rmtree(binding_file_dir) raise ex diff --git a/src/galaxy/config.py b/src/galaxy/config.py index 2013e62c..f102816d 100644 --- a/src/galaxy/config.py +++ b/src/galaxy/config.py @@ -67,9 +67,10 @@ export_path = config.get('API_CONFIG', 'export_path', fallback=None) if export_path is None: - export_path = "exports/" -if export_path.endswith("/") is False: - export_path = f"""{export_path}/""" + export_path = "exports" +if not os.path.exists(export_path): + # Create a exports directory because it does not exist + os.makedirs(export_path) shp_limit = int(config.get('API_CONFIG', 'shp_limit', fallback=4096)) diff --git a/src/galaxy/validation/models.py b/src/galaxy/validation/models.py index 4b806aae..a33d52bd 100644 --- a/src/galaxy/validation/models.py +++ b/src/galaxy/validation/models.py @@ -480,7 +480,7 @@ class RawDataOutputType (Enum): KML = "kml" SHAPEFILE = "shp" FlatGeobuf = "fgb" - MBTILES = "MBTILES" # fully experimental for now + MBTILES = "mbtiles" # fully experimental for now class HashtagParams(BaseModel): From bbd3013f8a8e1e74f2012131d3bce6c1beb7d11b Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Thu, 29 Sep 2022 16:56:26 +0545 Subject: [PATCH 120/153] removed shp limit since we don't need it we have queue in place , moved tasks --- API/api_worker.py | 4 +--- docs/CONFIG_DOC.md | 1 - src/galaxy/app.py | 10 +++++----- src/galaxy/config.py | 2 -- src/galaxy/validation/models.py | 9 +++++++++ 5 files changed, 15 insertions(+), 11 deletions(-) diff --git a/API/api_worker.py b/API/api_worker.py index 7c6e1f5f..ca8d04fd 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -31,9 +31,7 @@ def process_raw_data(self, params): try: start_time = dt.now() - bind_zip=True - if params.output_type == RawDataOutputType.FlatGeobuf.value: - bind_zip=False + bind_zip=params.bind_zip # unique id for zip file and geojson for each export exportname = f"{format_file_name_str(params.file_name) if params.file_name else 'Export'}_{str(self.request.id)}_{params.output_type}" diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index 09e10691..7334856d 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -212,7 +212,6 @@ max_area=100000 # max area to support for rawdata input use_connection_pooling=True # default it will not use connection pooling but you can configure api to use to for psycopg2 connections log_level=info #options are info,debug,warning,error env=dev # default is dev , supported values are dev and prod -shp_limit=6000 # in mb default is 4096 export_rate_limit=5 # no of requests per minute - default is 5 requests per minute ``` Based on your requirement you can also customize rawdata exports parameter using EXPORT_UPLOAD block diff --git a/src/galaxy/app.py b/src/galaxy/app.py index 8794f818..9985d6cb 100644 --- a/src/galaxy/app.py +++ b/src/galaxy/app.py @@ -20,7 +20,7 @@ import os import sys import threading -from src.galaxy.config import get_db_connection_params, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, BUCKET_NAME, level, logger as logging, export_path, use_connection_pooling, shp_limit +from src.galaxy.config import get_db_connection_params, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, BUCKET_NAME, level, logger as logging, export_path, use_connection_pooling from src.galaxy.validation.models import Source from psycopg2 import connect, sql from psycopg2.extras import DictCursor @@ -928,7 +928,7 @@ def ogr_export_shp(point_query, line_query, poly_query, working_dir, file_name): # standard file path for the generation point_file_path=os.path.join(working_dir,f"{file_name}_point.shp") # command for ogr2ogr to generate file - cmd = '''ogr2ogr -overwrite -f ESRI Shapefile {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( + cmd = '''ogr2ogr -overwrite -f "ESRI Shapefile" {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( export_path=point_file_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=query_path) logging.debug("Calling ogr2ogr-Point Shapefile") run_ogr2ogr_cmd(cmd) @@ -941,7 +941,7 @@ def ogr_export_shp(point_query, line_query, poly_query, working_dir, file_name): with open(query_path, 'w', encoding="UTF-8") as file: file.write(line_query) line_file_path=os.path.join(working_dir,f"{file_name}_line.shp") - cmd = '''ogr2ogr -overwrite -f ESRI Shapefile {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( + cmd = '''ogr2ogr -overwrite -f "ESRI Shapefile" {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( export_path=line_file_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=query_path) logging.debug("Calling ogr2ogr-Line Shapefile") run_ogr2ogr_cmd(cmd) @@ -954,8 +954,8 @@ def ogr_export_shp(point_query, line_query, poly_query, working_dir, file_name): # writing to .sql to pass in ogr2ogr because we don't want to pass too much argument on command with sql with open(query_path, 'w', encoding="UTF-8") as file: file.write(poly_query) - cmd = '''ogr2ogr -overwrite -f ESRI Shapefile {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( - export_path=poly_file_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=poly_query_path) + cmd = '''ogr2ogr -overwrite -f "ESRI Shapefile" {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( + export_path=poly_file_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=query_path) logging.debug("Calling ogr2ogr-Poly Shapefile") run_ogr2ogr_cmd(cmd) # clear query file we don't need it anymore diff --git a/src/galaxy/config.py b/src/galaxy/config.py index f102816d..47ca61ce 100644 --- a/src/galaxy/config.py +++ b/src/galaxy/config.py @@ -72,8 +72,6 @@ # Create a exports directory because it does not exist os.makedirs(export_path) -shp_limit = int(config.get('API_CONFIG', 'shp_limit', fallback=4096)) - AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, BUCKET_NAME = None, None, None # check either to use connection pooling or not diff --git a/src/galaxy/validation/models.py b/src/galaxy/validation/models.py index a33d52bd..6759240c 100644 --- a/src/galaxy/validation/models.py +++ b/src/galaxy/validation/models.py @@ -30,6 +30,8 @@ from area import area import re +from tomlkit import boolean + from ..config import config MAX_POLYGON_AREA = 5000 # km^2 @@ -586,8 +588,15 @@ class RawDataCurrentParams(BaseModel): file_name: Optional[str] = None geometry: Union[Polygon, MultiPolygon] filters: Optional[dict] = None + bind_zip: Optional[bool] = True geometry_type: Optional[List[SupportedGeometryFilters]] = None + @validator("bind_zip", allow_reuse=True) + def check_bind_option(cls, value, values): + if value is False and values.get("output_type")=='shp': + raise ValueError("Can't deliver Shapefile without zip , Remove bind_zip paramet or set it to True") + return value + @validator("filters", allow_reuse=True) def check_value(cls, value, values): """Checks given fields""" From 1a3a6f4033c1ec315d4715f5ebb735f75d0c6a9e Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Thu, 29 Sep 2022 17:45:54 +0545 Subject: [PATCH 121/153] fixed bug on filename --- API/api_worker.py | 3 ++- src/galaxy/app.py | 2 +- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/API/api_worker.py b/API/api_worker.py index ca8d04fd..31acbde1 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -33,7 +33,8 @@ def process_raw_data(self, params): start_time = dt.now() bind_zip=params.bind_zip # unique id for zip file and geojson for each export - exportname = f"{format_file_name_str(params.file_name) if params.file_name else 'Export'}_{str(self.request.id)}_{params.output_type}" + params.file_name=format_file_name_str(params.file_name) if params.file_name else 'Export' + exportname = f"{params.file_name}_{str(self.request.id)}_{params.output_type}" logging.info("Request %s received", exportname) diff --git a/src/galaxy/app.py b/src/galaxy/app.py index 9985d6cb..6ff8d55e 100644 --- a/src/galaxy/app.py +++ b/src/galaxy/app.py @@ -977,7 +977,7 @@ def ogr_export(query, outputtype, working_dir, dump_temp_path): run_ogr2ogr_cmd(cmd) if outputtype == RawDataOutputType.FlatGeobuf.value: - cmd = '''ogr2ogr -overwrite -f FlatGeobuf {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( + cmd = '''ogr2ogr -overwrite -f FlatGeobuf {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress VERIFY_BUFFERS=NO'''.format( export_path=dump_temp_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=query_path) run_ogr2ogr_cmd(cmd) From 12a198ef979dcb28e39624e1f19988b26bea905d Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Thu, 29 Sep 2022 18:43:30 +0545 Subject: [PATCH 122/153] added kml and flatgeobuff file format --- API/api_worker.py | 4 +- API/raw_data.py | 110 +++++++++++----------------- docs/CONFIG_DOC.md | 1 + src/galaxy/config.py | 4 +- src/galaxy/query_builder/builder.py | 23 +++--- src/galaxy/validation/models.py | 21 ++++-- 6 files changed, 75 insertions(+), 88 deletions(-) diff --git a/API/api_worker.py b/API/api_worker.py index 31acbde1..9d6acd54 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -12,7 +12,7 @@ from src.galaxy.query_builder.builder import format_file_name_str from src.galaxy.validation.models import RawDataOutputType from src.galaxy.app import RawData, S3FileTransfer -from src.galaxy.config import use_s3_to_upload, logger as logging, config +from src.galaxy.config import use_s3_to_upload, logger as logging, config, allow_bind_zip_filter celery = Celery(__name__) celery.conf.broker_url = config.get( @@ -31,7 +31,7 @@ def process_raw_data(self, params): try: start_time = dt.now() - bind_zip=params.bind_zip + bind_zip=params.bind_zip if allow_bind_zip_filter else True # unique id for zip file and geojson for each export params.file_name=format_file_name_str(params.file_name) if params.file_name else 'Export' exportname = f"{params.file_name}_{str(self.request.id)}_{params.output_type}" diff --git a/API/raw_data.py b/API/raw_data.py index c4e318fa..27cf515d 100644 --- a/API/raw_data.py +++ b/API/raw_data.py @@ -39,7 +39,7 @@ from src.galaxy.validation.models import RawDataCurrentParams, RawDataOutputType from src.galaxy.app import RawData, S3FileTransfer from .api_worker import process_raw_data -from src.galaxy.config import export_rate_limit, use_s3_to_upload, logger as logging, config, limiter +from src.galaxy.config import export_rate_limit, use_s3_to_upload, logger as logging, config, limiter, allow_bind_zip_filter router = APIRouter(prefix="/raw-data") @@ -59,7 +59,7 @@ def get_current_snapshot_osm_data(params: RawDataCurrentParams, background_tasks params (RawDataCurrentParams): { - "outputType": "GeoJSON", + "outputType": "GeoJSON", # supported are : kml,shp,fgb "fileName": "string", "geometry": { # only polygon is supported ** required field ** "coordinates": [ @@ -90,7 +90,8 @@ def get_current_snapshot_osm_data(params: RawDataCurrentParams, background_tasks }, "geometryType": [ "point","line","polygon" - ] + ], + joinFilterType:"OR" # options are and / or . 'or' by default -- applies condition for filters **optional } background_tasks (BackgroundTasks): task to cleanup the files produced during export request (Request): request instance @@ -209,80 +210,54 @@ def get_current_snapshot_osm_data(params: RawDataCurrentParams, background_tasks """ # def get_current_data(params:RawDataCurrentParams,background_tasks: BackgroundTasks, user_data=Depends(login_required)): # this will use osm login makes it restrict login start_time = dt.now() - if params.output_type is None: # if no ouput type is supplied default is geojson output - params.output_type = RawDataOutputType.GEOJSON.value - + bind_zip=params.bind_zip if allow_bind_zip_filter else True # unique id for zip file and geojson for each export - if params.file_name: - # need to format string from space to _ because it is filename , may be we need to filter special character as well later on - formatted_file_name = format_file_name_str(params.file_name) - # exportname = f"{formatted_file_name}_{datetime.now().isoformat()}_{str(uuid4())}" - exportname = f"""{formatted_file_name}_{str(uuid4())}_{params.output_type}""" # disabled date for now - - else: - # exportname = f"Raw_Export_{datetime.now().isoformat()}_{str(uuid4())}" - exportname = f"Raw_Export_{str(uuid4())}_{params.output_type}" + params.file_name=format_file_name_str(params.file_name) if params.file_name else 'Export' + exportname = f"{params.file_name}_{str(str(uuid4()))}_{params.output_type}" logging.info("Request %s received", exportname) - dump_temp_file, geom_area, root_dir_file = RawData( - params).extract_current_data(exportname) - path = f"""{root_dir_file}{exportname}/""" - - if os.path.exists(path) is False: - return JSONResponse( - status_code=400, - content={"Error": "Request went too big"} - ) - - logging.debug('Zip Binding Started !') - # saving file in temp directory instead of memory so that zipping file will not eat memory - zip_temp_path = f"""{root_dir_file}{exportname}.zip""" - zf = zipfile.ZipFile(zip_temp_path, "w", zipfile.ZIP_DEFLATED) - - directory = pathlib.Path(path) - for file_path in directory.iterdir(): - zf.write(file_path, arcname=file_path.name) - - # Compressing geojson file - zf.writestr("clipping_boundary.geojson", - orjson.dumps(dict(params.geometry))) - - zf.close() - logging.debug('Zip Binding Done !') + geom_area, working_dir = RawData(params).extract_current_data(exportname) inside_file_size = 0 - for temp_file in dump_temp_file: - # clearing tmp geojson file since it is already dumped to zip file we don't need it anymore - if os.path.exists(temp_file): - inside_file_size += os.path.getsize(temp_file) - - # remove the file that are just binded to zip file , we no longer need to store it - background_tasks.add_task(remove_file, path) - + if bind_zip: + logging.debug('Zip Binding Started !') + # saving file in temp directory instead of memory so that zipping file will not eat memory + upload_file_path = os.path.join(working_dir,os.pardir,f"{exportname}.zip") + + zf = zipfile.ZipFile(upload_file_path, "w", zipfile.ZIP_DEFLATED) + for file_path in pathlib.Path(working_dir).iterdir(): + zf.write(file_path, arcname=file_path.name) + inside_file_size += os.path.getsize(file_path) + + # Compressing geojson file + zf.writestr("clipping_boundary.geojson", + orjson.dumps(dict(params.geometry))) + + zf.close() + logging.debug('Zip Binding Done !') + else: + for file_path in pathlib.Path(working_dir).iterdir(): + upload_file_path=file_path + inside_file_size += os.path.getsize(file_path) + break # only take one file inside dir , if contains many it should be inside zip # check if download url will be generated from s3 or not from config if use_s3_to_upload: file_transfer_obj = S3FileTransfer() - download_url = file_transfer_obj.upload(zip_temp_path, exportname) - # watches the status code of the link provided and deletes the file if it is 200 - background_tasks.add_task(watch_s3_upload, download_url, zip_temp_path) + download_url = file_transfer_obj.upload(upload_file_path, exportname, file_suffix='zip' if bind_zip else params.output_type.lower()) else: - - # getting from config in case api and frontend is not hosted on same url - client_host = config.get( - "API_CONFIG", "api_host", fallback=f"""{request.url.scheme}://{request.client.host}""") - client_port = config.get("API_CONFIG", "api_port", fallback=8000) - - if client_port: - download_url = f"""{client_host}:{client_port}/v1/exports/{exportname}.zip""" # disconnected download portion from this endpoint because when there will be multiple hits at a same time we don't want function to get stuck waiting for user to download the file and deliver the response , we want to reduce waiting time and free function ! - else: - download_url = f"""{client_host}/v1/exports/{exportname}.zip""" # disconnected download portion from this endpoint because when there will be multiple hits at a same time we don't want function to get stuck waiting for user to download the file and deliver the response , we want to reduce waiting time and free function ! + download_url = str(upload_file_path) # give the static file download url back to user served from fastapi static export path # getting file size of zip , units are in bytes converted to mb in response - zip_file_size = os.path.getsize(zip_temp_path) + zip_file_size = os.path.getsize(upload_file_path) + # watches the status code of the link provided and deletes the file if it is 200 + if use_s3_to_upload: + background_tasks.add_task(watch_s3_upload,download_url, upload_file_path) + if use_s3_to_upload or bind_zip: + #remove working dir from the machine , if its inside zip / uploaded we no longer need it + background_tasks.add_task(remove_file,working_dir) response_time = dt.now() - start_time response_time_str = str(response_time) - logging.info( - f"Done Export : {exportname} of {round(inside_file_size/1000000)} MB / {geom_area} sqkm in {response_time_str}") + logging.info(f"Done Export : {exportname} of {round(inside_file_size/1000000)} MB / {geom_area} sqkm in {response_time_str}") return {"download_url": download_url, "file_name": exportname, "response_time": response_time_str, "query_area": f"""{geom_area} Sq Km """, "binded_file_size": f"""{round(inside_file_size/1000000,2)} MB""", "zip_file_size_bytes": {zip_file_size}} @@ -337,7 +312,7 @@ def watch_s3_upload(url: str, path: str) -> None: @version(2) def get_current_snapshot_of_osm_data( params: RawDataCurrentParams, request: Request): - """Generates the current raw OpenStreetMap data available on database based on the input geometry, query and spatial features + """Generates the current raw OpenStreetMap data available on database based on the input geometry, query and spatial features. API Supports Flatgeobuff(fgb) output format which will be the fatest output format among all Steps to Run Snapshot : @@ -383,7 +358,8 @@ def get_current_snapshot_of_osm_data( }, "geometryType": [ "point","line","polygon" - ] + ], + joinFilterType:"OR" # options are and / or , 'or' by default -- applies condition for filters **optional } background_tasks (BackgroundTasks): task to cleanup the files produced during export request (Request): request instance @@ -397,7 +373,7 @@ def get_current_snapshot_of_osm_data( Sample Query : 1. Sample query to extract point and polygon features that are marked building=* with name attribute { - "outputType": "GeoJSON", + "outputType": "fgb", "fileName": "Pokhara_buildings", "geometry": { "type": "Polygon", diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index 7334856d..587dbfb7 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -212,6 +212,7 @@ max_area=100000 # max area to support for rawdata input use_connection_pooling=True # default it will not use connection pooling but you can configure api to use to for psycopg2 connections log_level=info #options are info,debug,warning,error env=dev # default is dev , supported values are dev and prod +allow_bind_zip_filter=true # default is false, this will allow people to choose either their output should be zipped or not . Default all output will be zipped export_rate_limit=5 # no of requests per minute - default is 5 requests per minute ``` Based on your requirement you can also customize rawdata exports parameter using EXPORT_UPLOAD block diff --git a/src/galaxy/config.py b/src/galaxy/config.py index 47ca61ce..d5bd99de 100644 --- a/src/galaxy/config.py +++ b/src/galaxy/config.py @@ -71,7 +71,9 @@ if not os.path.exists(export_path): # Create a exports directory because it does not exist os.makedirs(export_path) - +allow_bind_zip_filter=config.get('API_CONFIG', 'allow_bind_zip_filter', fallback=None) +if allow_bind_zip_filter: + allow_bind_zip_filter=True if allow_bind_zip_filter.lower()=='true' else False AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, BUCKET_NAME = None, None, None # check either to use connection pooling or not diff --git a/src/galaxy/query_builder/builder.py b/src/galaxy/query_builder/builder.py index 3935193c..d374ec6f 100644 --- a/src/galaxy/query_builder/builder.py +++ b/src/galaxy/query_builder/builder.py @@ -900,7 +900,7 @@ def create_column_filter(columns, create_schema=False): return """osm_id ,tags::text as tags,changeset,timestamp::text,geom""" # this is default attribute that we will deliver to user if user defines his own attribute column then those will be appended with osm_id only -def generate_tag_filter_query(filter): +def generate_tag_filter_query(filter,params): incoming_filter = [] for key, value in filter.items(): @@ -919,7 +919,10 @@ def generate_tag_filter_query(filter): f"""tags ->> '{key.strip()}' = '{value[0].strip()}'""") else: incoming_filter.append(f"""tags ? '{key.strip()}'""") - tag_filter = " OR ".join(incoming_filter) + if params.join_filter_type: + tag_filter = f" {params.join_filter_type} ".join(incoming_filter) + else: + tag_filter = " OR ".join(incoming_filter) return tag_filter @@ -942,7 +945,7 @@ def extract_geometry_type_query(params, ogr_export=False): select_condition, schema = create_column_filter( master_attribute_filter, create_schema=True) if master_tag_filter: - attribute_filter = generate_tag_filter_query(master_tag_filter) + attribute_filter = generate_tag_filter_query(master_tag_filter, params) if params.geometry_type is None: # fix me params.geometry_type = ['point', 'line', 'polygon'] @@ -958,7 +961,7 @@ def extract_geometry_type_query(params, ogr_export=False): where {geom_filter}""" if point_tag_filter: - attribute_filter = generate_tag_filter_query(point_tag_filter) + attribute_filter = generate_tag_filter_query(point_tag_filter, params) if attribute_filter: query_point += f""" and ({attribute_filter})""" point_schema = schema @@ -984,7 +987,7 @@ def extract_geometry_type_query(params, ogr_export=False): where {geom_filter}""" if line_tag_filter: - attribute_filter = generate_tag_filter_query(line_tag_filter) + attribute_filter = generate_tag_filter_query(line_tag_filter, params) if attribute_filter: query_ways_line += f""" and ({attribute_filter})""" query_relations_line += f""" and ({attribute_filter})""" @@ -1013,7 +1016,7 @@ def extract_geometry_type_query(params, ogr_export=False): where {geom_filter}""" if poly_tag_filter: - attribute_filter = generate_tag_filter_query(poly_tag_filter) + attribute_filter = generate_tag_filter_query(poly_tag_filter, params) if attribute_filter: query_ways_poly += f""" and ({attribute_filter})""" query_relations_poly += f""" and ({attribute_filter})""" @@ -1120,17 +1123,17 @@ def raw_currentdata_extraction_query(params, g_id, geometry_dump, ogr_export=Fal point_attribute_filter) if tags: if master_tag_filter: # if master tag is supplied then other tags should be ignored and master tag will be used - master_tag = generate_tag_filter_query(master_tag_filter) + master_tag = generate_tag_filter_query(master_tag_filter, params) point_tag = master_tag line_tag = master_tag poly_tag = master_tag else: if point_tag_filter: - point_tag = generate_tag_filter_query(point_tag_filter) + point_tag = generate_tag_filter_query(point_tag_filter, params) if line_tag_filter: - line_tag = generate_tag_filter_query(line_tag_filter) + line_tag = generate_tag_filter_query(line_tag_filter, params) if poly_tag_filter: - poly_tag = generate_tag_filter_query(poly_tag_filter) + poly_tag = generate_tag_filter_query(poly_tag_filter, params) # condition for geometry types if params.geometry_type is None: diff --git a/src/galaxy/validation/models.py b/src/galaxy/validation/models.py index 6759240c..336043d2 100644 --- a/src/galaxy/validation/models.py +++ b/src/galaxy/validation/models.py @@ -32,7 +32,7 @@ from tomlkit import boolean -from ..config import config +from src.galaxy.config import config, allow_bind_zip_filter MAX_POLYGON_AREA = 5000 # km^2 @@ -582,20 +582,25 @@ def has_value(cls, value): """Checks if the value is supported""" return value in cls._value2member_map_ +class JoinFilterType (Enum): + OR = "OR" + AND ="AND" class RawDataCurrentParams(BaseModel): output_type: Optional[RawDataOutputType] = None file_name: Optional[str] = None geometry: Union[Polygon, MultiPolygon] filters: Optional[dict] = None - bind_zip: Optional[bool] = True + join_filter_type: Optional[JoinFilterType]=None geometry_type: Optional[List[SupportedGeometryFilters]] = None + if allow_bind_zip_filter: + bind_zip: Optional[bool] = True - @validator("bind_zip", allow_reuse=True) - def check_bind_option(cls, value, values): - if value is False and values.get("output_type")=='shp': - raise ValueError("Can't deliver Shapefile without zip , Remove bind_zip paramet or set it to True") - return value + @validator("bind_zip", allow_reuse=True) + def check_bind_option(cls, value, values): + if value is False and values.get("output_type")=='shp': + raise ValueError("Can't deliver Shapefile without zip , Remove bind_zip paramet or set it to True") + return value @validator("filters", allow_reuse=True) def check_value(cls, value, values): @@ -649,7 +654,7 @@ def check_geometry_area(cls, value, values): output_type = values.get("output_type") if output_type: # for mbtiles ogr2ogr does very worst job when area gets bigger we should write owr own or find better approach for larger area - if output_type is RawDataOutputType.MBTILES.value: + if output_type == RawDataOutputType.MBTILES.value: RAWDATA_CURRENT_POLYGON_AREA = 2 # we need to figure out how much tile we are generating before passing request on the basis of bounding box we can restrict user , right now relation contains whole country for now restricted to this area but can not query relation will take ages because that will intersect with country boundary : need to clip it if area_km2 > RAWDATA_CURRENT_POLYGON_AREA: raise ValueError( From 87cab1a1fdf488a2c274eab2ee492b23c056b458 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Thu, 29 Sep 2022 18:54:55 +0545 Subject: [PATCH 123/153] updated doc ! --- API/raw_data.py | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/API/raw_data.py b/API/raw_data.py index 27cf515d..ce34e617 100644 --- a/API/raw_data.py +++ b/API/raw_data.py @@ -59,7 +59,7 @@ def get_current_snapshot_osm_data(params: RawDataCurrentParams, background_tasks params (RawDataCurrentParams): { - "outputType": "GeoJSON", # supported are : kml,shp,fgb + "outputType": "GeoJSON", # supported are : kml,shp,(FlatGeobuf)fgb "fileName": "string", "geometry": { # only polygon is supported ** required field ** "coordinates": [ @@ -312,7 +312,7 @@ def watch_s3_upload(url: str, path: str) -> None: @version(2) def get_current_snapshot_of_osm_data( params: RawDataCurrentParams, request: Request): - """Generates the current raw OpenStreetMap data available on database based on the input geometry, query and spatial features. API Supports Flatgeobuff(fgb) output format which will be the fatest output format among all + """Generates the current raw OpenStreetMap data available on database based on the input geometry, query and spatial features. Steps to Run Snapshot : @@ -327,7 +327,7 @@ def get_current_snapshot_of_osm_data( params (RawDataCurrentParams): { - "outputType": "GeoJSON", + "outputType": "GeoJSON", # supports kml,(FlatGeobuf)fgb,shp "fileName": "string", "geometry": { # only polygon is supported ** required field ** "coordinates": [ From da59948848b6951c6072b7820bc9c46a8d83faa9 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Thu, 29 Sep 2022 19:03:34 +0545 Subject: [PATCH 124/153] Updated the feature ! --- API/raw_data.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/API/raw_data.py b/API/raw_data.py index ce34e617..d088419d 100644 --- a/API/raw_data.py +++ b/API/raw_data.py @@ -373,7 +373,7 @@ def get_current_snapshot_of_osm_data( Sample Query : 1. Sample query to extract point and polygon features that are marked building=* with name attribute { - "outputType": "fgb", + "outputType": "GeoJSON", "fileName": "Pokhara_buildings", "geometry": { "type": "Polygon", From 614d2d3056d9e8921456104f089b8c2a7f09e126 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Thu, 29 Sep 2022 19:16:37 +0545 Subject: [PATCH 125/153] removed unnecessary library --- src/galaxy/validation/models.py | 2 -- 1 file changed, 2 deletions(-) diff --git a/src/galaxy/validation/models.py b/src/galaxy/validation/models.py index 336043d2..b8cae1e3 100644 --- a/src/galaxy/validation/models.py +++ b/src/galaxy/validation/models.py @@ -30,8 +30,6 @@ from area import area import re -from tomlkit import boolean - from src.galaxy.config import config, allow_bind_zip_filter MAX_POLYGON_AREA = 5000 # km^2 From 3f6b2b546f888bf5cd9711d92135fa63099e95d6 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Thu, 29 Sep 2022 19:18:14 +0545 Subject: [PATCH 126/153] added docstring for bind option --- src/galaxy/validation/models.py | 1 + 1 file changed, 1 insertion(+) diff --git a/src/galaxy/validation/models.py b/src/galaxy/validation/models.py index b8cae1e3..3932d0c0 100644 --- a/src/galaxy/validation/models.py +++ b/src/galaxy/validation/models.py @@ -596,6 +596,7 @@ class RawDataCurrentParams(BaseModel): @validator("bind_zip", allow_reuse=True) def check_bind_option(cls, value, values): + """checks if shp is selected along with bind to zip file""" if value is False and values.get("output_type")=='shp': raise ValueError("Can't deliver Shapefile without zip , Remove bind_zip paramet or set it to True") return value From 17a38d641c41e3d2cff6b89025a9766c9bc00a6b Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 30 Sep 2022 09:57:21 +0545 Subject: [PATCH 127/153] changed limiter backend to redis #346 --- docs/CONFIG_DOC.md | 3 ++- src/galaxy/config.py | 5 ++++- 2 files changed, 6 insertions(+), 2 deletions(-) diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index 587dbfb7..94b4b5a0 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -212,7 +212,8 @@ max_area=100000 # max area to support for rawdata input use_connection_pooling=True # default it will not use connection pooling but you can configure api to use to for psycopg2 connections log_level=info #options are info,debug,warning,error env=dev # default is dev , supported values are dev and prod -allow_bind_zip_filter=true # default is false, this will allow people to choose either their output should be zipped or not . Default all output will be zipped +allow_bind_zip_filter=true # option to configure export output zipped/unzipped Default all output will be zipped +limiter_storage_uri=redis://localhost:6379 # API uses redis as backend for rate limiting export_rate_limit=5 # no of requests per minute - default is 5 requests per minute ``` Based on your requirement you can also customize rawdata exports parameter using EXPORT_UPLOAD block diff --git a/src/galaxy/config.py b/src/galaxy/config.py index d5bd99de..dff8f0ee 100644 --- a/src/galaxy/config.py +++ b/src/galaxy/config.py @@ -35,7 +35,10 @@ config = ConfigParser() config.read(CONFIG_FILE_PATH) -limiter = Limiter(key_func=get_remote_address) # rate limiter for API requests +limiter_storage_uri = config.get( + "API_CONFIG", "limiter_storage_uri", fallback="redis://localhost:6379" +) +limiter = Limiter(key_func=get_remote_address, storage_uri=limiter_storage_uri) # rate limiter for API requests based on the remote ip address and redis as backend export_rate_limit = int(config.get("API_CONFIG", "export_rate_limit", fallback=5)) # get log level from config log_level = config.get("API_CONFIG", "log_level", fallback=None) From ff526118e76cb5d7f34ea6d55c226b7e4534b5ea Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Fri, 30 Sep 2022 11:54:18 +0545 Subject: [PATCH 128/153] Changed port for flower --- README.md | 6 +++--- 1 file changed, 3 insertions(+), 3 deletions(-) diff --git a/README.md b/README.md index 6e1e4813..1173c48d 100644 --- a/README.md +++ b/README.md @@ -76,7 +76,7 @@ celery --app API.api_worker worker --loglevel=INFO API uses flower for monitoring the Celery distributed queue. Run this command on different shell , if you are running redis on same machine your broker could be ```redis://localhost:6379/``` ``` -celery --app API.api_worker flower --port=5550 --broker=redis://redis:6379/ +celery --app API.api_worker flower --port=5000 --broker=redis://redis:6379/ ``` ### 6. Navigate to Fast API Docs to get details about API Endpoint @@ -87,10 +87,10 @@ After sucessfully running server , hit [this](http://127.0.0.1:8000/latest/docs) http://127.0.0.1:8000/latest/docs ``` -Flower dashboard should be available on 5550 localhost port. +Flower dashboard should be available on 5000 localhost port. ``` -http://127.0.0.1:5550/ +http://127.0.0.1:5000/ ``` ## Check API Installation From e0a2b85a04725d268c73a69adfaa8fb1ddf854bb Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 30 Sep 2022 12:36:02 +0545 Subject: [PATCH 129/153] added geopackage file format --- API/raw_data.py | 4 ++-- src/galaxy/app.py | 8 ++++++-- src/galaxy/validation/models.py | 4 ++-- 3 files changed, 10 insertions(+), 6 deletions(-) diff --git a/API/raw_data.py b/API/raw_data.py index d088419d..eb651926 100644 --- a/API/raw_data.py +++ b/API/raw_data.py @@ -59,7 +59,7 @@ def get_current_snapshot_osm_data(params: RawDataCurrentParams, background_tasks params (RawDataCurrentParams): { - "outputType": "GeoJSON", # supported are : kml,shp,(FlatGeobuf)fgb + "outputType": "GeoJSON", # supported are : kml,shp,(FLATGEOBUF)fgb "fileName": "string", "geometry": { # only polygon is supported ** required field ** "coordinates": [ @@ -327,7 +327,7 @@ def get_current_snapshot_of_osm_data( params (RawDataCurrentParams): { - "outputType": "GeoJSON", # supports kml,(FlatGeobuf)fgb,shp + "outputType": "GeoJSON", # supports kml,(FLATGEOBUF)fgb,shp "fileName": "string", "geometry": { # only polygon is supported ** required field ** "coordinates": [ diff --git a/src/galaxy/app.py b/src/galaxy/app.py index 6ff8d55e..1f7a33e9 100644 --- a/src/galaxy/app.py +++ b/src/galaxy/app.py @@ -976,8 +976,8 @@ def ogr_export(query, outputtype, working_dir, dump_temp_path): export_path=dump_temp_path, host=db_items.get('host'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=query_path) run_ogr2ogr_cmd(cmd) - if outputtype == RawDataOutputType.FlatGeobuf.value: - cmd = '''ogr2ogr -overwrite -f FlatGeobuf {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress VERIFY_BUFFERS=NO'''.format( + if outputtype == RawDataOutputType.FLATGEOBUF.value: + cmd = '''ogr2ogr -overwrite -f FLATGEOBUF {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress VERIFY_BUFFERS=NO'''.format( export_path=dump_temp_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=query_path) run_ogr2ogr_cmd(cmd) @@ -986,6 +986,10 @@ def ogr_export(query, outputtype, working_dir, dump_temp_path): export_path=dump_temp_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=query_path) run_ogr2ogr_cmd(cmd) + if outputtype == RawDataOutputType.GEOPACKAGE.value: + cmd = '''ogr2ogr -overwrite -f GPKG {export_path} PG:"host={host} port={port} user={username} dbname={db} password={password}" -sql @"{pg_sql_select}" -progress'''.format( + export_path=dump_temp_path, host=db_items.get('host'), port=db_items.get('port'), username=db_items.get('user'), db=db_items.get('database'), password=db_items.get('password'), pg_sql_select=query_path) + run_ogr2ogr_cmd(cmd) # clear query file we don't need it anymore os.remove(query_path) diff --git a/src/galaxy/validation/models.py b/src/galaxy/validation/models.py index 3932d0c0..6d0e1a2b 100644 --- a/src/galaxy/validation/models.py +++ b/src/galaxy/validation/models.py @@ -479,9 +479,9 @@ class RawDataOutputType (Enum): GEOJSON = "GeoJSON" KML = "kml" SHAPEFILE = "shp" - FlatGeobuf = "fgb" + FLATGEOBUF = "fgb" MBTILES = "mbtiles" # fully experimental for now - + GEOPACKAGE = "gpkg" class HashtagParams(BaseModel): hashtags: Optional[List[str]] From 1451d0e6985ea436cf4b3fc4ca6364511802d748 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 30 Sep 2022 12:50:16 +0545 Subject: [PATCH 130/153] read grid index threshold from config --- docs/CONFIG_DOC.md | 1 + src/galaxy/app.py | 4 ++-- src/galaxy/config.py | 6 +++++- 3 files changed, 8 insertions(+), 3 deletions(-) diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index 94b4b5a0..384a86c7 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -214,6 +214,7 @@ log_level=info #options are info,debug,warning,error env=dev # default is dev , supported values are dev and prod allow_bind_zip_filter=true # option to configure export output zipped/unzipped Default all output will be zipped limiter_storage_uri=redis://localhost:6379 # API uses redis as backend for rate limiting +grid_index_threshold=5000 # value in sqkm to apply grid index filter export_rate_limit=5 # no of requests per minute - default is 5 requests per minute ``` Based on your requirement you can also customize rawdata exports parameter using EXPORT_UPLOAD block diff --git a/src/galaxy/app.py b/src/galaxy/app.py index 1f7a33e9..d1234fd6 100644 --- a/src/galaxy/app.py +++ b/src/galaxy/app.py @@ -20,7 +20,7 @@ import os import sys import threading -from src.galaxy.config import get_db_connection_params, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, BUCKET_NAME, level, logger as logging, export_path, use_connection_pooling +from src.galaxy.config import get_db_connection_params, grid_index_threshold, AWS_ACCESS_KEY_ID, AWS_SECRET_ACCESS_KEY, BUCKET_NAME, level, logger as logging, export_path, use_connection_pooling from src.galaxy.validation.models import Source from psycopg2 import connect, sql from psycopg2.extras import DictCursor @@ -1035,7 +1035,7 @@ def get_grid_id(geom, cur): # generating geometry area in sqkm geom_area = int(area(json.loads(geom.json())) * 1E-6) # only apply grid in the logic if it exceeds the 5000 Sqkm - if geom_area > 5000: + if geom_area > grid_index_threshold: # this will be applied only when polygon gets bigger we will be slicing index size to search cur.execute( get_grid_id_query(geometry_dump)) diff --git a/src/galaxy/config.py b/src/galaxy/config.py index dff8f0ee..5af19c96 100644 --- a/src/galaxy/config.py +++ b/src/galaxy/config.py @@ -28,6 +28,7 @@ import os CONFIG_FILE_PATH = "src/config.txt" +use_s3_to_upload = False if os.path.exists(CONFIG_FILE_PATH) is False: raise FileNotFoundError(errno.ENOENT, os.strerror(errno.ENOENT), CONFIG_FILE_PATH) @@ -39,10 +40,13 @@ "API_CONFIG", "limiter_storage_uri", fallback="redis://localhost:6379" ) limiter = Limiter(key_func=get_remote_address, storage_uri=limiter_storage_uri) # rate limiter for API requests based on the remote ip address and redis as backend + export_rate_limit = int(config.get("API_CONFIG", "export_rate_limit", fallback=5)) + +grid_index_threshold = int(config.get("API_CONFIG", "grid_index_threshold", fallback=5000)) + # get log level from config log_level = config.get("API_CONFIG", "log_level", fallback=None) -use_s3_to_upload = False if log_level is None or log_level.lower() == 'debug': # default debug level = logging.DEBUG From fd5d5d08555910eba684f8429e1c767569e2c10d Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Fri, 30 Sep 2022 16:15:00 +0545 Subject: [PATCH 131/153] added sample data to docker postgres --- .gitignore | 1 + Dockerfile | 9 + docker-compose.yml | 24 +- docker-multiple-db.sh | 22 + populate-docker-db.sh | 6 + tests/src/fixtures/raw_data.sql | 15 +- tests/src/fixtures/tasking-manager.sql | 1503 ++++++++++++++++++++++++ 7 files changed, 1576 insertions(+), 4 deletions(-) create mode 100644 docker-multiple-db.sh create mode 100644 populate-docker-db.sh create mode 100644 tests/src/fixtures/tasking-manager.sql diff --git a/.gitignore b/.gitignore index 8af24106..1c238ea4 100644 --- a/.gitignore +++ b/.gitignore @@ -12,5 +12,6 @@ build newrelic.ini newrelic.ini_backup exports +postgres-data *.out *.log diff --git a/Dockerfile b/Dockerfile index fb09fdd4..cd6b7bc0 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,6 +8,15 @@ RUN apt-get update && apt-get -y upgrade && \ RUN mkdir /app COPY requirements.docker.txt /app/requirements.docker.txt +COPY populate-docker-db.sh /docker-entrypoint-initdb.d/ +COPY docker-multiple-db.sh /docker-entrypoint-initdb.d/ +COPY /tests/src/fixtures/insights.sql /insights.sql +COPY /tests/src/fixtures/mapathon_summary.sql /mapathon_summary.sql +COPY /tests/src/fixtures/raw_data.sql /raw_data.sql +COPY /tests/src/fixtures/underpass.sql /underpass.sql +COPY /tests/src/fixtures/tasking-manager.sql /tasking-manager.sql + + COPY setup.py /app/setup.py WORKDIR /app diff --git a/docker-compose.yml b/docker-compose.yml index 2e4a4e58..cf59a672 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -4,6 +4,7 @@ services: app: build: . + container_name: api command: uvicorn API.main:app --reload --host 0.0.0.0 --port 8000 --no-use-colors --proxy-headers ports: - 8000:8000 @@ -11,23 +12,44 @@ services: - .:/app depends_on: - redis + - postgres + + postgres: + image: postgis/postgis + container_name: pgsql + environment: + - POSTGRES_MULTIPLE_DATABASES="underpass","tm","raw","insights" + - POSTGRES_USER=postgres + - POSTGRES_PASSWORD=admin + ports: + - '5434:5432' + volumes: + - ./tests/src/fixtures/:/sql/ + - ./postgres-data:/var/lib/postgresql/data + - ./docker-multiple-db.sh:/docker-entrypoint-initdb.d/docker-multiple-db.sh + - ./populate-docker-db.sh:/docker-entrypoint-initdb.d/populate-docker-db.sh + worker: build: . - command: celery --app API.api_worker worker --loglevel=DEBUG + container_name: worker + command: celery --app API.api_worker worker --loglevel=INFO volumes: - .:/app depends_on: - app - redis + - postgres redis: image: redis:6-alpine + container_name: redis ports: - "6379:6379" worker-dashboard: build: . + container_name: flower command: celery --app API.api_worker flower --port=5550 --broker=redis://redis:6379/ ports: - 5550:5550 diff --git a/docker-multiple-db.sh b/docker-multiple-db.sh new file mode 100644 index 00000000..18c0f96b --- /dev/null +++ b/docker-multiple-db.sh @@ -0,0 +1,22 @@ +#!/bin/bash + +set -e +set -u + +function create_user_and_database() { + local database=$1 + echo " Creating user and database '$database'" + psql -v ON_ERROR_STOP=1 --username "$POSTGRES_USER" <<-EOSQL + CREATE USER $database; + CREATE DATABASE $database; + GRANT ALL PRIVILEGES ON DATABASE $database TO $database; +EOSQL +} + +if [ -n "$POSTGRES_MULTIPLE_DATABASES" ]; then + echo "Multiple database creation requested: $POSTGRES_MULTIPLE_DATABASES" + for db in $(echo $POSTGRES_MULTIPLE_DATABASES | tr ',' ' '); do + create_user_and_database $db + done + echo "Multiple databases created" +fi \ No newline at end of file diff --git a/populate-docker-db.sh b/populate-docker-db.sh new file mode 100644 index 00000000..89aae5e0 --- /dev/null +++ b/populate-docker-db.sh @@ -0,0 +1,6 @@ + #!/bin/bash + psql -U postgres insights < /sql/insights.sql + psql -U postgres insights < /sql/mapathon_summary.sql + psql -U postgres raw < /sql/raw_data.sql + psql -U postgres underpass < /sql/underpass.sql + psql -U postgres tm < /sql/tasking-manager.sql \ No newline at end of file diff --git a/tests/src/fixtures/raw_data.sql b/tests/src/fixtures/raw_data.sql index 643738aa..14fc17c0 100644 --- a/tests/src/fixtures/raw_data.sql +++ b/tests/src/fixtures/raw_data.sql @@ -24,7 +24,7 @@ CREATE EXTENSION IF NOT EXISTS btree_gist WITH SCHEMA public; -- --- Name: EXTENSION btree_gist; Type: COMMENT; Schema: -; Owner: +-- Name: EXTENSION btree_gist; Type: COMMENT; Schema: -; Owner: -- COMMENT ON EXTENSION btree_gist IS 'support for indexing common datatypes in GiST'; @@ -38,7 +38,7 @@ CREATE EXTENSION IF NOT EXISTS hstore WITH SCHEMA public; -- --- Name: EXTENSION hstore; Type: COMMENT; Schema: -; Owner: +-- Name: EXTENSION hstore; Type: COMMENT; Schema: -; Owner: -- COMMENT ON EXTENSION hstore IS 'data type for storing sets of (key, value) pairs'; @@ -52,7 +52,7 @@ CREATE EXTENSION IF NOT EXISTS postgis WITH SCHEMA public; -- --- Name: EXTENSION postgis; Type: COMMENT; Schema: -; Owner: +-- Name: EXTENSION postgis; Type: COMMENT; Schema: -; Owner: -- COMMENT ON EXTENSION postgis IS 'PostGIS geometry and geography spatial types and functions'; @@ -173,6 +173,15 @@ CREATE UNLOGGED TABLE public.ways_poly ( ) WITH (autovacuum_enabled=off); +CREATE TABLE public.planet_osm_replication_status ( + url text NULL, + "sequence" int4 NULL, + importdate timestamptz NULL +); + +INSERT INTO public.planet_osm_replication_status (url,"sequence",importdate) VALUES + ('https://planet.openstreetmap.org/replication/minute',5000271,'2022-04-04 02:44:59+05:45'); + ALTER TABLE public.ways_poly OWNER TO postgres; diff --git a/tests/src/fixtures/tasking-manager.sql b/tests/src/fixtures/tasking-manager.sql new file mode 100644 index 00000000..15d98f61 --- /dev/null +++ b/tests/src/fixtures/tasking-manager.sql @@ -0,0 +1,1503 @@ +-- +-- PostgreSQL database dump +-- + +-- Dumped from database version 11.2 +-- Dumped by pg_dump version 11.2 + +SET statement_timeout = 0; +SET lock_timeout = 0; +SET idle_in_transaction_session_timeout = 0; +SET client_encoding = 'UTF8'; +SET standard_conforming_strings = on; +SELECT pg_catalog.set_config('search_path', '', false); +SET check_function_bodies = false; +SET client_min_messages = warning; +SET row_security = off; + +-- +-- Name: topology; Type: SCHEMA; Schema: -; Owner: - +-- + +CREATE SCHEMA topology; + + +-- +-- Name: SCHEMA topology; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON SCHEMA topology IS 'PostGIS Topology schema'; + + +-- +-- Name: postgis; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS postgis WITH SCHEMA public; + + +-- +-- Name: EXTENSION postgis; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION postgis IS 'PostGIS geometry, geography, and raster spatial types and functions'; + + +-- +-- Name: postgis_topology; Type: EXTENSION; Schema: -; Owner: - +-- + +CREATE EXTENSION IF NOT EXISTS postgis_topology WITH SCHEMA topology; + + +-- +-- Name: EXTENSION postgis_topology; Type: COMMENT; Schema: -; Owner: - +-- + +COMMENT ON EXTENSION postgis_topology IS 'PostGIS topology spatial types and functions'; + + +SET default_tablespace = ''; + +SET default_with_oids = false; + +-- +-- Name: alembic_version; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.alembic_version ( + version_num character varying(32) NOT NULL +); + + +-- +-- Name: licenses; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.licenses ( + id integer NOT NULL, + name character varying, + description character varying, + plain_text character varying +); + + +-- +-- Name: licenses_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.licenses_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: licenses_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.licenses_id_seq OWNED BY public.licenses.id; + + +-- +-- Name: messages; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.messages ( + id integer NOT NULL, + message character varying, + subject character varying, + from_user_id bigint, + to_user_id bigint, + date timestamp without time zone, + read boolean, + message_type integer, + project_id integer, + task_id integer +); + + +-- +-- Name: messages_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.messages_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: messages_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.messages_id_seq OWNED BY public.messages.id; + + +-- +-- Name: priority_areas; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.priority_areas ( + id integer NOT NULL, + geometry public.geometry(Polygon,4326) +); + + +-- +-- Name: priority_areas_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.priority_areas_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: priority_areas_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.priority_areas_id_seq OWNED BY public.priority_areas.id; + + +-- +-- Name: project_allowed_users; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.project_allowed_users ( + project_id integer, + user_id bigint +); + + +-- +-- Name: project_chat; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.project_chat ( + id bigint NOT NULL, + project_id integer NOT NULL, + user_id integer NOT NULL, + time_stamp timestamp without time zone NOT NULL, + message character varying NOT NULL +); + + +-- +-- Name: project_chat_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.project_chat_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: project_chat_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.project_chat_id_seq OWNED BY public.project_chat.id; + + +-- +-- Name: project_info; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.project_info ( + project_id integer NOT NULL, + locale character varying(10) NOT NULL, + name character varying(512), + short_description character varying, + description character varying, + instructions character varying, + project_id_str character varying, + text_searchable tsvector, + per_task_instructions character varying +); + + +-- +-- Name: project_priority_areas; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.project_priority_areas ( + project_id integer, + priority_area_id integer +); + + +-- +-- Name: projects; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.projects ( + id integer NOT NULL, + status integer NOT NULL, + created timestamp without time zone NOT NULL, + priority integer, + default_locale character varying(10), + author_id bigint NOT NULL, + mapper_level integer NOT NULL, + enforce_mapper_level boolean, + enforce_validator_role boolean, + private boolean, + entities_to_map character varying, + changeset_comment character varying, + due_date timestamp without time zone, + imagery character varying, + josm_preset character varying, + last_updated timestamp without time zone, + mapping_types integer[], + organisation_tag character varying, + campaign_tag character varying, + total_tasks integer NOT NULL, + tasks_mapped integer NOT NULL, + tasks_validated integer NOT NULL, + tasks_bad_imagery integer NOT NULL, + license_id integer, + centroid public.geometry(Point,4326), + geometry public.geometry(MultiPolygon,4326), + task_creation_mode integer +); + + +-- +-- Name: projects_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.projects_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: projects_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.projects_id_seq OWNED BY public.projects.id; + + +-- +-- Name: tags; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.tags ( + id integer NOT NULL, + organisations character varying, + campaigns character varying +); + + +-- +-- Name: tags_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.tags_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: tags_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.tags_id_seq OWNED BY public.tags.id; + + +-- +-- Name: task_history; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.task_history ( + id integer NOT NULL, + project_id integer, + task_id integer NOT NULL, + action character varying NOT NULL, + action_text character varying, + action_date timestamp without time zone NOT NULL, + user_id bigint NOT NULL +); + + +-- +-- Name: task_history_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.task_history_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: task_history_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.task_history_id_seq OWNED BY public.task_history.id; + + +-- +-- Name: task_invalidation_history; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.task_invalidation_history ( + id integer NOT NULL, + project_id integer NOT NULL, + task_id integer NOT NULL, + is_closed boolean, + mapper_id bigint, + mapped_date timestamp without time zone, + invalidator_id bigint, + invalidated_date timestamp without time zone, + invalidation_history_id integer, + validator_id bigint, + validated_date timestamp without time zone, + updated_date timestamp without time zone +); + + +-- +-- Name: task_invalidation_history_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.task_invalidation_history_id_seq + AS integer + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: task_invalidation_history_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.task_invalidation_history_id_seq OWNED BY public.task_invalidation_history.id; + + +-- +-- Name: tasks; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.tasks ( + id integer NOT NULL, + project_id integer NOT NULL, + x integer, + y integer, + zoom integer, + geometry public.geometry(MultiPolygon,4326), + task_status integer, + locked_by bigint, + mapped_by bigint, + validated_by bigint, + is_square boolean, + extra_properties character varying +); + + +-- +-- Name: users; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.users ( + id bigint NOT NULL, + username character varying, + role integer NOT NULL, + mapping_level integer NOT NULL, + tasks_mapped integer NOT NULL, + tasks_validated integer NOT NULL, + tasks_invalidated integer NOT NULL, + projects_mapped integer[], + email_address character varying, + facebook_id character varying, + is_email_verified boolean, + linkedin_id character varying, + twitter_id character varying, + date_registered timestamp without time zone, + last_validation_date timestamp without time zone, + validation_message boolean DEFAULT true NOT NULL, + is_expert boolean +); + + +-- +-- Name: users_id_seq; Type: SEQUENCE; Schema: public; Owner: - +-- + +CREATE SEQUENCE public.users_id_seq + START WITH 1 + INCREMENT BY 1 + NO MINVALUE + NO MAXVALUE + CACHE 1; + + +-- +-- Name: users_id_seq; Type: SEQUENCE OWNED BY; Schema: public; Owner: - +-- + +ALTER SEQUENCE public.users_id_seq OWNED BY public.users.id; + + +-- +-- Name: users_licenses; Type: TABLE; Schema: public; Owner: - +-- + +CREATE TABLE public.users_licenses ( + "user" bigint, + license integer +); + + +-- +-- Name: licenses id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.licenses ALTER COLUMN id SET DEFAULT nextval('public.licenses_id_seq'::regclass); + + +-- +-- Name: messages id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.messages ALTER COLUMN id SET DEFAULT nextval('public.messages_id_seq'::regclass); + + +-- +-- Name: priority_areas id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.priority_areas ALTER COLUMN id SET DEFAULT nextval('public.priority_areas_id_seq'::regclass); + + +-- +-- Name: project_chat id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.project_chat ALTER COLUMN id SET DEFAULT nextval('public.project_chat_id_seq'::regclass); + + +-- +-- Name: projects id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.projects ALTER COLUMN id SET DEFAULT nextval('public.projects_id_seq'::regclass); + + +-- +-- Name: tags id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.tags ALTER COLUMN id SET DEFAULT nextval('public.tags_id_seq'::regclass); + + +-- +-- Name: task_history id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.task_history ALTER COLUMN id SET DEFAULT nextval('public.task_history_id_seq'::regclass); + + +-- +-- Name: task_invalidation_history id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.task_invalidation_history ALTER COLUMN id SET DEFAULT nextval('public.task_invalidation_history_id_seq'::regclass); + + +-- +-- Name: users id; Type: DEFAULT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.users ALTER COLUMN id SET DEFAULT nextval('public.users_id_seq'::regclass); + + +-- +-- Data for Name: alembic_version; Type: TABLE DATA; Schema: public; Owner: - +-- + +COPY public.alembic_version (version_num) FROM stdin; +0a6b82b55983 +\. + + +-- +-- Data for Name: licenses; Type: TABLE DATA; Schema: public; Owner: - +-- + +COPY public.licenses (id, name, description, plain_text) FROM stdin; +\. + + +-- +-- Data for Name: messages; Type: TABLE DATA; Schema: public; Owner: - +-- + +COPY public.messages (id, message, subject, from_user_id, to_user_id, date, read, message_type, project_id, task_id) FROM stdin; +2 Hi ramyaragupathy,
\n
\nWelcome to the HOT Tasking Manager, we hope you will enjoy being part of the community that is helping map the world.
\n
\nIf you would like to be alerted to project updates and feedback on your mapping, please add your email address to your profile by clicking on the link below.
\n
\nUpdate your profile here
\n
\nThank you very much!
\n
\nOn behalf of the Humanitarian OpenStreetMap Team volunteer and staff community we want to welcome you to humanitarian mapping and the wider OpenStreetMap community.
\n
\nFor a much more detailed welcome letter, please visit the OSM Wiki Tasking Manager Welcome page. It has links to great learning resources if you want to learn more right away!\n Welcome to the HOT Tasking Manager \N 2823295 2019-04-09 03:11:54.74308 t 1 \N \N +\. + + +-- +-- Data for Name: priority_areas; Type: TABLE DATA; Schema: public; Owner: - +-- + +COPY public.priority_areas (id, geometry) FROM stdin; +\. + + +-- +-- Data for Name: project_allowed_users; Type: TABLE DATA; Schema: public; Owner: - +-- + +COPY public.project_allowed_users (project_id, user_id) FROM stdin; +\. + + +-- +-- Data for Name: project_chat; Type: TABLE DATA; Schema: public; Owner: - +-- + +COPY public.project_chat (id, project_id, user_id, time_stamp, message) FROM stdin; +\. + + +-- +-- Data for Name: project_info; Type: TABLE DATA; Schema: public; Owner: - +-- + +COPY public.project_info (project_id, locale, name, short_description, description, instructions, project_id_str, text_searchable, per_task_instructions) FROM stdin; +1 en testing ssa testing ssa ahsjhdjshfjsh jhsjdh fsjhdfjs jsfhd jshjf hsjdhf sjhfjsh jshfjsdh jshjf hsjhf jsdhfjs hfjshf jsdhfj sdhfjh jsdhfjshfj shjf hsdj testing ssa ahsjhdjshfjsh jhsjdh fsjhdfjs jsfhd jshjf hsjdhf sjhfjsh jshfjsdh jshjf hsjhf jsdhfjs hfjshf jsdhfj sdhfjh jsdhfjshfj shjf hsdj testing ssa ahsjhdjshfjsh jhsjdh fsjhdfjs jsfhd jshjf hsjdhf sjhfjsh jshfjsdh jshjf hsjhf jsdhfjs hfjshf jsdhfj sdhfjh jsdhfjshfj shjf hsdj 1 \N +2 en arbitrary-project tests tsete tests tsete tests tsete 2 \N +3 en arbitrary-1 arbitrary- test split arbitrary- test split arbitrary- test split 3 \N +\. + + +-- +-- Data for Name: project_priority_areas; Type: TABLE DATA; Schema: public; Owner: - +-- + +COPY public.project_priority_areas (project_id, priority_area_id) FROM stdin; +\. + + +-- +-- Data for Name: projects; Type: TABLE DATA; Schema: public; Owner: - +-- + +COPY public.projects (id, status, created, priority, default_locale, author_id, mapper_level, enforce_mapper_level, enforce_validator_role, private, entities_to_map, changeset_comment, due_date, imagery, josm_preset, last_updated, mapping_types, organisation_tag, campaign_tag, total_tasks, tasks_mapped, tasks_validated, tasks_bad_imagery, license_id, centroid, geometry, task_creation_mode) FROM stdin; +2 1 2019-04-08 12:21:37.425808 1 en 360183 1 f f f \N #hotosm-project-2 \N \N \N 2019-04-09 03:34:52.985812 {} \N \N 16 1 0 1 \N 0101000020E61000004B5FA2C5E5DC3240E03320AF4AF740C0 0106000020E610000001000000010300000001000000050000000100405B40D53240CDE033A57FF540C0010040B6F2D6324010F53EC585F940C00000C02390E23240EDEACD35A0F840C0000080ADC9E63240EBCFC9CBFBF540C00100405B40D53240CDE033A57FF540C0 1 +1 1 2019-04-08 10:54:25.449637 1 en 360183 1 f f f \N #tm-project-1 \N \N \N 2019-04-08 11:40:37.248906 {} \N \N 171 1 1 2 \N 0101000020E61000005A0601152A3543C05A9C94D060CD29C0 0106000020E6100000010000000103000000010000000E00000015A922BEFA3943C0A897377D739529C015A922BE421F43C053173DCE36BD29C015A922BE522D43C0883A6048D8EC29C016A922BE9B2F43C0F948A4C9BBE829C014A9223E5A3C43C0D647DECB9E092AC014A9223ECA4443C05FC6E2FB38072AC015A922BED24343C0988DCFC461F429C015A922BE024143C0B194E08B47E429C015A922BE4B4343C0387210A150D929C016A922BEC73F43C0D1675BBE01CE29C015A922BE7E3D43C0FAB7CBCCD8AF29C016A9223EEC4043C06AD15D4AFE9C29C014A9223E924043C001D02AE3649329C015A922BEFA3943C0A897377D739529C0 0 +3 1 2019-04-08 12:23:34.568831 2 en 360183 1 f f f \N #hotosm-project-3 \N \N \N 2019-04-08 12:24:51.647033 {} \N \N 1 0 0 0 \N 0101000020E61000004B5FA2C5E5DC3240E03320AF4AF740C0 0106000020E610000001000000010300000001000000050000000100405B40D53240CDE033A57FF540C0010040B6F2D6324010F53EC585F940C00000C02390E23240EDEACD35A0F840C0000080ADC9E63240EBCFC9CBFBF540C00100405B40D53240CDE033A57FF540C0 1 +\. + + +-- +-- Data for Name: spatial_ref_sys; Type: TABLE DATA; Schema: public; Owner: - +-- + +COPY public.spatial_ref_sys (srid, auth_name, auth_srid, srtext, proj4text) FROM stdin; +\. + + +-- +-- Data for Name: tags; Type: TABLE DATA; Schema: public; Owner: - +-- + +COPY public.tags (id, organisations, campaigns) FROM stdin; +\. + + +-- +-- Data for Name: task_history; Type: TABLE DATA; Schema: public; Owner: - +-- + +COPY public.task_history (id, project_id, task_id, action, action_text, action_date, user_id) FROM stdin; +3 1 169 STATE_CHANGE SPLIT 2019-04-08 10:56:41.164122 360183 +4 1 169 STATE_CHANGE READY 2019-04-08 10:56:41.165376 360183 +6 1 170 STATE_CHANGE SPLIT 2019-04-08 10:56:41.295043 360183 +7 1 170 STATE_CHANGE READY 2019-04-08 10:56:41.29674 360183 +9 1 171 STATE_CHANGE SPLIT 2019-04-08 10:56:41.420013 360183 +10 1 171 STATE_CHANGE READY 2019-04-08 10:56:41.421315 360183 +12 1 172 STATE_CHANGE SPLIT 2019-04-08 10:56:41.596023 360183 +13 1 172 STATE_CHANGE READY 2019-04-08 10:56:41.597381 360183 +15 1 172 STATE_CHANGE BADIMAGERY 2019-04-08 10:56:46.913498 360183 +14 1 172 LOCKED_FOR_MAPPING 00:00:01.205755 2019-04-08 10:56:45.710522 360183 +17 1 170 STATE_CHANGE MAPPED 2019-04-08 10:56:57.744843 360183 +16 1 170 LOCKED_FOR_MAPPING 00:00:06.556031 2019-04-08 10:56:51.192412 360183 +18 1 169 LOCKED_FOR_MAPPING 00:00:02.350503 2019-04-08 10:57:01.171814 360183 +20 1 129 STATE_CHANGE BADIMAGERY 2019-04-08 11:35:52.948456 360183 +19 1 129 LOCKED_FOR_MAPPING 00:00:03.338244 2019-04-08 11:35:49.613867 360183 +22 1 170 STATE_CHANGE VALIDATED 2019-04-08 11:40:37.256744 360183 +21 1 170 LOCKED_FOR_VALIDATION 00:00:03.416170 2019-04-08 11:40:33.85086 360183 +38 2 6 STATE_CHANGE READY 2019-04-08 12:25:14.405231 360183 +39 2 6 STATE_CHANGE SPLIT 2019-04-08 12:25:14.403753 360183 +40 2 6 STATE_CHANGE SPLIT 2019-04-08 12:25:24.973398 360183 +41 2 6 STATE_CHANGE READY 2019-04-08 12:25:24.97435 360183 +43 2 7 STATE_CHANGE READY 2019-04-08 12:25:14.405231 360183 +44 2 7 STATE_CHANGE SPLIT 2019-04-08 12:25:14.403753 360183 +45 2 7 STATE_CHANGE SPLIT 2019-04-08 12:25:25.098002 360183 +46 2 7 STATE_CHANGE READY 2019-04-08 12:25:25.098977 360183 +48 2 8 STATE_CHANGE READY 2019-04-08 12:25:14.405231 360183 +49 2 8 STATE_CHANGE SPLIT 2019-04-08 12:25:14.403753 360183 +50 2 8 STATE_CHANGE SPLIT 2019-04-08 12:25:25.761684 360183 +51 2 8 STATE_CHANGE READY 2019-04-08 12:25:25.76252 360183 +53 2 9 STATE_CHANGE READY 2019-04-08 12:25:14.405231 360183 +54 2 9 STATE_CHANGE SPLIT 2019-04-08 12:25:14.403753 360183 +55 2 9 STATE_CHANGE SPLIT 2019-04-08 12:25:25.896845 360183 +56 2 9 STATE_CHANGE READY 2019-04-08 12:25:25.897859 360183 +59 2 10 STATE_CHANGE READY 2019-04-08 12:25:14.871701 360183 +60 2 10 STATE_CHANGE SPLIT 2019-04-08 12:25:14.870823 360183 +61 2 10 STATE_CHANGE SPLIT 2019-04-08 12:25:31.435842 360183 +62 2 10 STATE_CHANGE READY 2019-04-08 12:25:31.437325 360183 +64 2 11 STATE_CHANGE READY 2019-04-08 12:25:14.871701 360183 +65 2 11 STATE_CHANGE SPLIT 2019-04-08 12:25:14.870823 360183 +66 2 11 STATE_CHANGE SPLIT 2019-04-08 12:25:31.565856 360183 +67 2 11 STATE_CHANGE READY 2019-04-08 12:25:31.566573 360183 +69 2 12 STATE_CHANGE READY 2019-04-08 12:25:14.871701 360183 +70 2 12 STATE_CHANGE SPLIT 2019-04-08 12:25:14.870823 360183 +71 2 12 STATE_CHANGE SPLIT 2019-04-08 12:25:31.700419 360183 +72 2 12 STATE_CHANGE READY 2019-04-08 12:25:31.701984 360183 +74 2 13 STATE_CHANGE READY 2019-04-08 12:25:14.871701 360183 +75 2 13 STATE_CHANGE SPLIT 2019-04-08 12:25:14.870823 360183 +76 2 13 STATE_CHANGE SPLIT 2019-04-08 12:25:31.823686 360183 +77 2 13 STATE_CHANGE READY 2019-04-08 12:25:31.82449 360183 +80 2 14 STATE_CHANGE READY 2019-04-08 12:25:14.269767 360183 +81 2 14 STATE_CHANGE SPLIT 2019-04-08 12:25:14.268203 360183 +82 2 14 STATE_CHANGE SPLIT 2019-04-08 12:25:39.346795 360183 +83 2 14 STATE_CHANGE READY 2019-04-08 12:25:39.348197 360183 +85 2 15 STATE_CHANGE READY 2019-04-08 12:25:14.269767 360183 +86 2 15 STATE_CHANGE SPLIT 2019-04-08 12:25:14.268203 360183 +87 2 15 STATE_CHANGE SPLIT 2019-04-08 12:25:39.491335 360183 +88 2 15 STATE_CHANGE READY 2019-04-08 12:25:39.492087 360183 +90 2 16 STATE_CHANGE READY 2019-04-08 12:25:14.269767 360183 +91 2 16 STATE_CHANGE SPLIT 2019-04-08 12:25:14.268203 360183 +92 2 16 STATE_CHANGE SPLIT 2019-04-08 12:25:39.622269 360183 +93 2 16 STATE_CHANGE READY 2019-04-08 12:25:39.623319 360183 +95 2 17 STATE_CHANGE READY 2019-04-08 12:25:14.269767 360183 +96 2 17 STATE_CHANGE SPLIT 2019-04-08 12:25:14.268203 360183 +97 2 17 STATE_CHANGE SPLIT 2019-04-08 12:25:39.748029 360183 +98 2 17 STATE_CHANGE READY 2019-04-08 12:25:39.749705 360183 +100 2 18 STATE_CHANGE SPLIT 2019-04-08 12:25:14.535811 360183 +101 2 18 STATE_CHANGE READY 2019-04-08 12:25:14.536724 360183 +103 2 18 STATE_CHANGE SPLIT 2019-04-08 12:25:45.646575 360183 +104 2 18 STATE_CHANGE READY 2019-04-08 12:25:45.647648 360183 +105 2 19 STATE_CHANGE SPLIT 2019-04-08 12:25:14.535811 360183 +106 2 19 STATE_CHANGE READY 2019-04-08 12:25:14.536724 360183 +108 2 19 STATE_CHANGE SPLIT 2019-04-08 12:25:45.778525 360183 +109 2 19 STATE_CHANGE READY 2019-04-08 12:25:45.779649 360183 +110 2 20 STATE_CHANGE SPLIT 2019-04-08 12:25:14.535811 360183 +111 2 20 STATE_CHANGE READY 2019-04-08 12:25:14.536724 360183 +113 2 20 STATE_CHANGE SPLIT 2019-04-08 12:25:45.90077 360183 +114 2 20 STATE_CHANGE READY 2019-04-08 12:25:45.901905 360183 +115 2 21 STATE_CHANGE SPLIT 2019-04-08 12:25:14.535811 360183 +116 2 21 STATE_CHANGE READY 2019-04-08 12:25:14.536724 360183 +118 2 21 STATE_CHANGE SPLIT 2019-04-08 12:25:46.025551 360183 +119 2 21 STATE_CHANGE READY 2019-04-08 12:25:46.026555 360183 +121 2 16 STATE_CHANGE BADIMAGERY 2019-04-09 03:27:47.072069 2823295 +120 2 16 LOCKED_FOR_MAPPING 00:00:03.433979 2019-04-09 03:27:43.64278 2823295 +123 2 9 STATE_CHANGE MAPPED 2019-04-09 03:34:52.996621 360183 +122 2 9 LOCKED_FOR_MAPPING 00:02:33.724149 2019-04-09 03:32:19.275216 360183 +\. + + +-- +-- Data for Name: task_invalidation_history; Type: TABLE DATA; Schema: public; Owner: - +-- + +COPY public.task_invalidation_history (id, project_id, task_id, is_closed, mapper_id, mapped_date, invalidator_id, invalidated_date, invalidation_history_id, validator_id, validated_date, updated_date) FROM stdin; +\. + + +-- +-- Data for Name: tasks; Type: TABLE DATA; Schema: public; Owner: - +-- + +COPY public.tasks (id, project_id, x, y, zoom, geometry, task_status, locked_by, mapped_by, validated_by, is_square, extra_properties) FROM stdin; +1 1 6438 7594 14 0106000020E61000000100000001030000000100000005000000BF2CF1FF1F4543C0E020D5674B0D2AC0BF2CF1FF1F4543C080B07A6455022AC0E82EF1FF4F4243C080B07A6455022AC0E82EF1FF4F4243C0E020D5674B0D2AC0BF2CF1FF1F4543C0E020D5674B0D2AC0 0 \N \N \N t \N +2 1 6438 7595 14 0106000020E61000000100000001030000000100000005000000BF2CF1FF1F4543C080B07A6455022AC0BF2CF1FF1F4543C0F0EB21235FF729C0E82EF1FF4F4243C0F0EB21235FF729C0E82EF1FF4F4243C080B07A6455022AC0BF2CF1FF1F4543C080B07A6455022AC0 0 \N \N \N t \N +3 1 6438 7596 14 0106000020E61000000100000001030000000100000005000000BF2CF1FF1F4543C0F0EB21235FF729C0BF2CF1FF1F4543C05023E3A368EC29C0E82EF1FF4F4243C05023E3A368EC29C0E82EF1FF4F4243C0F0EB21235FF729C0BF2CF1FF1F4543C0F0EB21235FF729C0 0 \N \N \N t \N +4 1 6438 7597 14 0106000020E61000000100000001030000000100000005000000BF2CF1FF1F4543C05023E3A368EC29C0BF2CF1FF1F4543C060A9D6E671E129C0E82EF1FF4F4243C060A9D6E671E129C0E82EF1FF4F4243C05023E3A368EC29C0BF2CF1FF1F4543C05023E3A368EC29C0 0 \N \N \N t \N +5 1 6438 7598 14 0106000020E61000000100000001030000000100000005000000BF2CF1FF1F4543C060A9D6E671E129C0BF2CF1FF1F4543C0D0D314EC7AD629C0E82EF1FF4F4243C0D0D314EC7AD629C0E82EF1FF4F4243C060A9D6E671E129C0BF2CF1FF1F4543C060A9D6E671E129C0 0 \N \N \N t \N +6 1 6438 7599 14 0106000020E61000000100000001030000000100000005000000BF2CF1FF1F4543C0D0D314EC7AD629C0BF2CF1FF1F4543C008FBB5B383CB29C0E82EF1FF4F4243C008FBB5B383CB29C0E82EF1FF4F4243C0D0D314EC7AD629C0BF2CF1FF1F4543C0D0D314EC7AD629C0 0 \N \N \N t \N +7 1 6438 7600 14 0106000020E61000000100000001030000000100000005000000BF2CF1FF1F4543C008FBB5B383CB29C0BF2CF1FF1F4543C0507AD23D8CC029C0E82EF1FF4F4243C0507AD23D8CC029C0E82EF1FF4F4243C008FBB5B383CB29C0BF2CF1FF1F4543C008FBB5B383CB29C0 0 \N \N \N t \N +8 1 6438 7601 14 0106000020E61000000100000001030000000100000005000000BF2CF1FF1F4543C0507AD23D8CC029C0BF2CF1FF1F4543C0A0AF828A94B529C0E82EF1FF4F4243C0A0AF828A94B529C0E82EF1FF4F4243C0507AD23D8CC029C0BF2CF1FF1F4543C0507AD23D8CC029C0 0 \N \N \N t \N +9 1 6438 7602 14 0106000020E61000000100000001030000000100000005000000BF2CF1FF1F4543C0A0AF828A94B529C0BF2CF1FF1F4543C0C8FBDE999CAA29C0E82EF1FF4F4243C0C8FBDE999CAA29C0E82EF1FF4F4243C0A0AF828A94B529C0BF2CF1FF1F4543C0A0AF828A94B529C0 0 \N \N \N t \N +10 1 6438 7603 14 0106000020E61000000100000001030000000100000005000000BF2CF1FF1F4543C0C8FBDE999CAA29C0BF2CF1FF1F4543C070C2FF6BA49F29C0E82EF1FF4F4243C070C2FF6BA49F29C0E82EF1FF4F4243C0C8FBDE999CAA29C0BF2CF1FF1F4543C0C8FBDE999CAA29C0 0 \N \N \N t \N +11 1 6438 7604 14 0106000020E61000000100000001030000000100000005000000BF2CF1FF1F4543C070C2FF6BA49F29C0BF2CF1FF1F4543C0D869FD00AC9429C0E82EF1FF4F4243C0D869FD00AC9429C0E82EF1FF4F4243C070C2FF6BA49F29C0BF2CF1FF1F4543C070C2FF6BA49F29C0 0 \N \N \N t \N +12 1 6438 7605 14 0106000020E61000000100000001030000000100000005000000BF2CF1FF1F4543C0D869FD00AC9429C0BF2CF1FF1F4543C0205BF058B38929C0E82EF1FF4F4243C0205BF058B38929C0E82EF1FF4F4243C0D869FD00AC9429C0BF2CF1FF1F4543C0D869FD00AC9429C0 0 \N \N \N t \N +13 1 6439 7594 14 0106000020E61000000100000001030000000100000005000000E82EF1FF4F4243C0E020D5674B0D2AC0E82EF1FF4F4243C080B07A6455022AC01131F1FF7F3F43C080B07A6455022AC01131F1FF7F3F43C0E020D5674B0D2AC0E82EF1FF4F4243C0E020D5674B0D2AC0 0 \N \N \N t \N +14 1 6439 7595 14 0106000020E61000000100000001030000000100000005000000E82EF1FF4F4243C080B07A6455022AC0E82EF1FF4F4243C0F0EB21235FF729C01131F1FF7F3F43C0F0EB21235FF729C01131F1FF7F3F43C080B07A6455022AC0E82EF1FF4F4243C080B07A6455022AC0 0 \N \N \N t \N +15 1 6439 7596 14 0106000020E61000000100000001030000000100000005000000E82EF1FF4F4243C0F0EB21235FF729C0E82EF1FF4F4243C05023E3A368EC29C01131F1FF7F3F43C05023E3A368EC29C01131F1FF7F3F43C0F0EB21235FF729C0E82EF1FF4F4243C0F0EB21235FF729C0 0 \N \N \N t \N +16 1 6439 7597 14 0106000020E61000000100000001030000000100000005000000E82EF1FF4F4243C05023E3A368EC29C0E82EF1FF4F4243C060A9D6E671E129C01131F1FF7F3F43C060A9D6E671E129C01131F1FF7F3F43C05023E3A368EC29C0E82EF1FF4F4243C05023E3A368EC29C0 0 \N \N \N t \N +17 1 6439 7598 14 0106000020E61000000100000001030000000100000005000000E82EF1FF4F4243C060A9D6E671E129C0E82EF1FF4F4243C0D0D314EC7AD629C01131F1FF7F3F43C0D0D314EC7AD629C01131F1FF7F3F43C060A9D6E671E129C0E82EF1FF4F4243C060A9D6E671E129C0 0 \N \N \N t \N +18 1 6439 7599 14 0106000020E61000000100000001030000000100000005000000E82EF1FF4F4243C0D0D314EC7AD629C0E82EF1FF4F4243C008FBB5B383CB29C01131F1FF7F3F43C008FBB5B383CB29C01131F1FF7F3F43C0D0D314EC7AD629C0E82EF1FF4F4243C0D0D314EC7AD629C0 0 \N \N \N t \N +19 1 6439 7600 14 0106000020E61000000100000001030000000100000005000000E82EF1FF4F4243C008FBB5B383CB29C0E82EF1FF4F4243C0507AD23D8CC029C01131F1FF7F3F43C0507AD23D8CC029C01131F1FF7F3F43C008FBB5B383CB29C0E82EF1FF4F4243C008FBB5B383CB29C0 0 \N \N \N t \N +20 1 6439 7601 14 0106000020E61000000100000001030000000100000005000000E82EF1FF4F4243C0507AD23D8CC029C0E82EF1FF4F4243C0A0AF828A94B529C01131F1FF7F3F43C0A0AF828A94B529C01131F1FF7F3F43C0507AD23D8CC029C0E82EF1FF4F4243C0507AD23D8CC029C0 0 \N \N \N t \N +21 1 6439 7602 14 0106000020E61000000100000001030000000100000005000000E82EF1FF4F4243C0A0AF828A94B529C0E82EF1FF4F4243C0C8FBDE999CAA29C01131F1FF7F3F43C0C8FBDE999CAA29C01131F1FF7F3F43C0A0AF828A94B529C0E82EF1FF4F4243C0A0AF828A94B529C0 0 \N \N \N t \N +22 1 6439 7603 14 0106000020E61000000100000001030000000100000005000000E82EF1FF4F4243C0C8FBDE999CAA29C0E82EF1FF4F4243C070C2FF6BA49F29C01131F1FF7F3F43C070C2FF6BA49F29C01131F1FF7F3F43C0C8FBDE999CAA29C0E82EF1FF4F4243C0C8FBDE999CAA29C0 0 \N \N \N t \N +23 1 6439 7604 14 0106000020E61000000100000001030000000100000005000000E82EF1FF4F4243C070C2FF6BA49F29C0E82EF1FF4F4243C0D869FD00AC9429C01131F1FF7F3F43C0D869FD00AC9429C01131F1FF7F3F43C070C2FF6BA49F29C0E82EF1FF4F4243C070C2FF6BA49F29C0 0 \N \N \N t \N +24 1 6439 7605 14 0106000020E61000000100000001030000000100000005000000E82EF1FF4F4243C0D869FD00AC9429C0E82EF1FF4F4243C0205BF058B38929C01131F1FF7F3F43C0205BF058B38929C01131F1FF7F3F43C0D869FD00AC9429C0E82EF1FF4F4243C0D869FD00AC9429C0 0 \N \N \N t \N +25 1 6440 7594 14 0106000020E610000001000000010300000001000000050000001131F1FF7F3F43C0E020D5674B0D2AC01131F1FF7F3F43C080B07A6455022AC03D33F1FFAF3C43C080B07A6455022AC03D33F1FFAF3C43C0E020D5674B0D2AC01131F1FF7F3F43C0E020D5674B0D2AC0 0 \N \N \N t \N +26 1 6440 7595 14 0106000020E610000001000000010300000001000000050000001131F1FF7F3F43C080B07A6455022AC01131F1FF7F3F43C0F0EB21235FF729C03D33F1FFAF3C43C0F0EB21235FF729C03D33F1FFAF3C43C080B07A6455022AC01131F1FF7F3F43C080B07A6455022AC0 0 \N \N \N t \N +27 1 6440 7596 14 0106000020E610000001000000010300000001000000050000001131F1FF7F3F43C0F0EB21235FF729C01131F1FF7F3F43C05023E3A368EC29C03D33F1FFAF3C43C05023E3A368EC29C03D33F1FFAF3C43C0F0EB21235FF729C01131F1FF7F3F43C0F0EB21235FF729C0 0 \N \N \N t \N +28 1 6440 7597 14 0106000020E610000001000000010300000001000000050000001131F1FF7F3F43C05023E3A368EC29C01131F1FF7F3F43C060A9D6E671E129C03D33F1FFAF3C43C060A9D6E671E129C03D33F1FFAF3C43C05023E3A368EC29C01131F1FF7F3F43C05023E3A368EC29C0 0 \N \N \N t \N +29 1 6440 7598 14 0106000020E610000001000000010300000001000000050000001131F1FF7F3F43C060A9D6E671E129C01131F1FF7F3F43C0D0D314EC7AD629C03D33F1FFAF3C43C0D0D314EC7AD629C03D33F1FFAF3C43C060A9D6E671E129C01131F1FF7F3F43C060A9D6E671E129C0 0 \N \N \N t \N +30 1 6440 7599 14 0106000020E610000001000000010300000001000000050000001131F1FF7F3F43C0D0D314EC7AD629C01131F1FF7F3F43C008FBB5B383CB29C03D33F1FFAF3C43C008FBB5B383CB29C03D33F1FFAF3C43C0D0D314EC7AD629C01131F1FF7F3F43C0D0D314EC7AD629C0 0 \N \N \N t \N +31 1 6440 7600 14 0106000020E610000001000000010300000001000000050000001131F1FF7F3F43C008FBB5B383CB29C01131F1FF7F3F43C0507AD23D8CC029C03D33F1FFAF3C43C0507AD23D8CC029C03D33F1FFAF3C43C008FBB5B383CB29C01131F1FF7F3F43C008FBB5B383CB29C0 0 \N \N \N t \N +32 1 6440 7601 14 0106000020E610000001000000010300000001000000050000001131F1FF7F3F43C0507AD23D8CC029C01131F1FF7F3F43C0A0AF828A94B529C03D33F1FFAF3C43C0A0AF828A94B529C03D33F1FFAF3C43C0507AD23D8CC029C01131F1FF7F3F43C0507AD23D8CC029C0 0 \N \N \N t \N +33 1 6440 7602 14 0106000020E610000001000000010300000001000000050000001131F1FF7F3F43C0A0AF828A94B529C01131F1FF7F3F43C0C8FBDE999CAA29C03D33F1FFAF3C43C0C8FBDE999CAA29C03D33F1FFAF3C43C0A0AF828A94B529C01131F1FF7F3F43C0A0AF828A94B529C0 0 \N \N \N t \N +34 1 6440 7603 14 0106000020E610000001000000010300000001000000050000001131F1FF7F3F43C0C8FBDE999CAA29C01131F1FF7F3F43C070C2FF6BA49F29C03D33F1FFAF3C43C070C2FF6BA49F29C03D33F1FFAF3C43C0C8FBDE999CAA29C01131F1FF7F3F43C0C8FBDE999CAA29C0 0 \N \N \N t \N +35 1 6440 7604 14 0106000020E610000001000000010300000001000000050000001131F1FF7F3F43C070C2FF6BA49F29C01131F1FF7F3F43C0D869FD00AC9429C03D33F1FFAF3C43C0D869FD00AC9429C03D33F1FFAF3C43C070C2FF6BA49F29C01131F1FF7F3F43C070C2FF6BA49F29C0 0 \N \N \N t \N +36 1 6440 7605 14 0106000020E610000001000000010300000001000000050000001131F1FF7F3F43C0D869FD00AC9429C01131F1FF7F3F43C0205BF058B38929C03D33F1FFAF3C43C0205BF058B38929C03D33F1FFAF3C43C0D869FD00AC9429C01131F1FF7F3F43C0D869FD00AC9429C0 0 \N \N \N t \N +37 1 6441 7594 14 0106000020E610000001000000010300000001000000050000003D33F1FFAF3C43C0E020D5674B0D2AC03D33F1FFAF3C43C080B07A6455022AC06735F1FFDF3943C080B07A6455022AC06735F1FFDF3943C0E020D5674B0D2AC03D33F1FFAF3C43C0E020D5674B0D2AC0 0 \N \N \N t \N +38 1 6441 7595 14 0106000020E610000001000000010300000001000000050000003D33F1FFAF3C43C080B07A6455022AC03D33F1FFAF3C43C0F0EB21235FF729C06735F1FFDF3943C0F0EB21235FF729C06735F1FFDF3943C080B07A6455022AC03D33F1FFAF3C43C080B07A6455022AC0 0 \N \N \N t \N +39 1 6441 7596 14 0106000020E610000001000000010300000001000000050000003D33F1FFAF3C43C0F0EB21235FF729C03D33F1FFAF3C43C05023E3A368EC29C06735F1FFDF3943C05023E3A368EC29C06735F1FFDF3943C0F0EB21235FF729C03D33F1FFAF3C43C0F0EB21235FF729C0 0 \N \N \N t \N +40 1 6441 7597 14 0106000020E610000001000000010300000001000000050000003D33F1FFAF3C43C05023E3A368EC29C03D33F1FFAF3C43C060A9D6E671E129C06735F1FFDF3943C060A9D6E671E129C06735F1FFDF3943C05023E3A368EC29C03D33F1FFAF3C43C05023E3A368EC29C0 0 \N \N \N t \N +41 1 6441 7598 14 0106000020E610000001000000010300000001000000050000003D33F1FFAF3C43C060A9D6E671E129C03D33F1FFAF3C43C0D0D314EC7AD629C06735F1FFDF3943C0D0D314EC7AD629C06735F1FFDF3943C060A9D6E671E129C03D33F1FFAF3C43C060A9D6E671E129C0 0 \N \N \N t \N +42 1 6441 7599 14 0106000020E610000001000000010300000001000000050000003D33F1FFAF3C43C0D0D314EC7AD629C03D33F1FFAF3C43C008FBB5B383CB29C06735F1FFDF3943C008FBB5B383CB29C06735F1FFDF3943C0D0D314EC7AD629C03D33F1FFAF3C43C0D0D314EC7AD629C0 0 \N \N \N t \N +43 1 6441 7600 14 0106000020E610000001000000010300000001000000050000003D33F1FFAF3C43C008FBB5B383CB29C03D33F1FFAF3C43C0507AD23D8CC029C06735F1FFDF3943C0507AD23D8CC029C06735F1FFDF3943C008FBB5B383CB29C03D33F1FFAF3C43C008FBB5B383CB29C0 0 \N \N \N t \N +44 1 6441 7601 14 0106000020E610000001000000010300000001000000050000003D33F1FFAF3C43C0507AD23D8CC029C03D33F1FFAF3C43C0A0AF828A94B529C06735F1FFDF3943C0A0AF828A94B529C06735F1FFDF3943C0507AD23D8CC029C03D33F1FFAF3C43C0507AD23D8CC029C0 0 \N \N \N t \N +45 1 6441 7602 14 0106000020E610000001000000010300000001000000050000003D33F1FFAF3C43C0A0AF828A94B529C03D33F1FFAF3C43C0C8FBDE999CAA29C06735F1FFDF3943C0C8FBDE999CAA29C06735F1FFDF3943C0A0AF828A94B529C03D33F1FFAF3C43C0A0AF828A94B529C0 0 \N \N \N t \N +46 1 6441 7603 14 0106000020E610000001000000010300000001000000050000003D33F1FFAF3C43C0C8FBDE999CAA29C03D33F1FFAF3C43C070C2FF6BA49F29C06735F1FFDF3943C070C2FF6BA49F29C06735F1FFDF3943C0C8FBDE999CAA29C03D33F1FFAF3C43C0C8FBDE999CAA29C0 0 \N \N \N t \N +47 1 6441 7604 14 0106000020E610000001000000010300000001000000050000003D33F1FFAF3C43C070C2FF6BA49F29C03D33F1FFAF3C43C0D869FD00AC9429C06735F1FFDF3943C0D869FD00AC9429C06735F1FFDF3943C070C2FF6BA49F29C03D33F1FFAF3C43C070C2FF6BA49F29C0 0 \N \N \N t \N +48 1 6441 7605 14 0106000020E610000001000000010300000001000000050000003D33F1FFAF3C43C0D869FD00AC9429C03D33F1FFAF3C43C0205BF058B38929C06735F1FFDF3943C0205BF058B38929C06735F1FFDF3943C0D869FD00AC9429C03D33F1FFAF3C43C0D869FD00AC9429C0 0 \N \N \N t \N +49 1 6442 7594 14 0106000020E610000001000000010300000001000000050000006735F1FFDF3943C0E020D5674B0D2AC06735F1FFDF3943C080B07A6455022AC09037F1FF0F3743C080B07A6455022AC09037F1FF0F3743C0E020D5674B0D2AC06735F1FFDF3943C0E020D5674B0D2AC0 0 \N \N \N t \N +50 1 6442 7595 14 0106000020E610000001000000010300000001000000050000006735F1FFDF3943C080B07A6455022AC06735F1FFDF3943C0F0EB21235FF729C09037F1FF0F3743C0F0EB21235FF729C09037F1FF0F3743C080B07A6455022AC06735F1FFDF3943C080B07A6455022AC0 0 \N \N \N t \N +51 1 6442 7596 14 0106000020E610000001000000010300000001000000050000006735F1FFDF3943C0F0EB21235FF729C06735F1FFDF3943C05023E3A368EC29C09037F1FF0F3743C05023E3A368EC29C09037F1FF0F3743C0F0EB21235FF729C06735F1FFDF3943C0F0EB21235FF729C0 0 \N \N \N t \N +52 1 6442 7597 14 0106000020E610000001000000010300000001000000050000006735F1FFDF3943C05023E3A368EC29C06735F1FFDF3943C060A9D6E671E129C09037F1FF0F3743C060A9D6E671E129C09037F1FF0F3743C05023E3A368EC29C06735F1FFDF3943C05023E3A368EC29C0 0 \N \N \N t \N +53 1 6442 7598 14 0106000020E610000001000000010300000001000000050000006735F1FFDF3943C060A9D6E671E129C06735F1FFDF3943C0D0D314EC7AD629C09037F1FF0F3743C0D0D314EC7AD629C09037F1FF0F3743C060A9D6E671E129C06735F1FFDF3943C060A9D6E671E129C0 0 \N \N \N t \N +54 1 6442 7599 14 0106000020E610000001000000010300000001000000050000006735F1FFDF3943C0D0D314EC7AD629C06735F1FFDF3943C008FBB5B383CB29C09037F1FF0F3743C008FBB5B383CB29C09037F1FF0F3743C0D0D314EC7AD629C06735F1FFDF3943C0D0D314EC7AD629C0 0 \N \N \N t \N +55 1 6442 7600 14 0106000020E610000001000000010300000001000000050000006735F1FFDF3943C008FBB5B383CB29C06735F1FFDF3943C0507AD23D8CC029C09037F1FF0F3743C0507AD23D8CC029C09037F1FF0F3743C008FBB5B383CB29C06735F1FFDF3943C008FBB5B383CB29C0 0 \N \N \N t \N +56 1 6442 7601 14 0106000020E610000001000000010300000001000000050000006735F1FFDF3943C0507AD23D8CC029C06735F1FFDF3943C0A0AF828A94B529C09037F1FF0F3743C0A0AF828A94B529C09037F1FF0F3743C0507AD23D8CC029C06735F1FFDF3943C0507AD23D8CC029C0 0 \N \N \N t \N +57 1 6442 7602 14 0106000020E610000001000000010300000001000000050000006735F1FFDF3943C0A0AF828A94B529C06735F1FFDF3943C0C8FBDE999CAA29C09037F1FF0F3743C0C8FBDE999CAA29C09037F1FF0F3743C0A0AF828A94B529C06735F1FFDF3943C0A0AF828A94B529C0 0 \N \N \N t \N +58 1 6442 7603 14 0106000020E610000001000000010300000001000000050000006735F1FFDF3943C0C8FBDE999CAA29C06735F1FFDF3943C070C2FF6BA49F29C09037F1FF0F3743C070C2FF6BA49F29C09037F1FF0F3743C0C8FBDE999CAA29C06735F1FFDF3943C0C8FBDE999CAA29C0 0 \N \N \N t \N +59 1 6442 7604 14 0106000020E610000001000000010300000001000000050000006735F1FFDF3943C070C2FF6BA49F29C06735F1FFDF3943C0D869FD00AC9429C09037F1FF0F3743C0D869FD00AC9429C09037F1FF0F3743C070C2FF6BA49F29C06735F1FFDF3943C070C2FF6BA49F29C0 0 \N \N \N t \N +60 1 6442 7605 14 0106000020E610000001000000010300000001000000050000006735F1FFDF3943C0D869FD00AC9429C06735F1FFDF3943C0205BF058B38929C09037F1FF0F3743C0205BF058B38929C09037F1FF0F3743C0D869FD00AC9429C06735F1FFDF3943C0D869FD00AC9429C0 0 \N \N \N t \N +61 1 6443 7594 14 0106000020E610000001000000010300000001000000050000009037F1FF0F3743C0E020D5674B0D2AC09037F1FF0F3743C080B07A6455022AC0B939F1FF3F3443C080B07A6455022AC0B939F1FF3F3443C0E020D5674B0D2AC09037F1FF0F3743C0E020D5674B0D2AC0 0 \N \N \N t \N +62 1 6443 7595 14 0106000020E610000001000000010300000001000000050000009037F1FF0F3743C080B07A6455022AC09037F1FF0F3743C0F0EB21235FF729C0B939F1FF3F3443C0F0EB21235FF729C0B939F1FF3F3443C080B07A6455022AC09037F1FF0F3743C080B07A6455022AC0 0 \N \N \N t \N +63 1 6443 7596 14 0106000020E610000001000000010300000001000000050000009037F1FF0F3743C0F0EB21235FF729C09037F1FF0F3743C05023E3A368EC29C0B939F1FF3F3443C05023E3A368EC29C0B939F1FF3F3443C0F0EB21235FF729C09037F1FF0F3743C0F0EB21235FF729C0 0 \N \N \N t \N +64 1 6443 7597 14 0106000020E610000001000000010300000001000000050000009037F1FF0F3743C05023E3A368EC29C09037F1FF0F3743C060A9D6E671E129C0B939F1FF3F3443C060A9D6E671E129C0B939F1FF3F3443C05023E3A368EC29C09037F1FF0F3743C05023E3A368EC29C0 0 \N \N \N t \N +65 1 6443 7598 14 0106000020E610000001000000010300000001000000050000009037F1FF0F3743C060A9D6E671E129C09037F1FF0F3743C0D0D314EC7AD629C0B939F1FF3F3443C0D0D314EC7AD629C0B939F1FF3F3443C060A9D6E671E129C09037F1FF0F3743C060A9D6E671E129C0 0 \N \N \N t \N +66 1 6443 7599 14 0106000020E610000001000000010300000001000000050000009037F1FF0F3743C0D0D314EC7AD629C09037F1FF0F3743C008FBB5B383CB29C0B939F1FF3F3443C008FBB5B383CB29C0B939F1FF3F3443C0D0D314EC7AD629C09037F1FF0F3743C0D0D314EC7AD629C0 0 \N \N \N t \N +67 1 6443 7600 14 0106000020E610000001000000010300000001000000050000009037F1FF0F3743C008FBB5B383CB29C09037F1FF0F3743C0507AD23D8CC029C0B939F1FF3F3443C0507AD23D8CC029C0B939F1FF3F3443C008FBB5B383CB29C09037F1FF0F3743C008FBB5B383CB29C0 0 \N \N \N t \N +68 1 6443 7601 14 0106000020E610000001000000010300000001000000050000009037F1FF0F3743C0507AD23D8CC029C09037F1FF0F3743C0A0AF828A94B529C0B939F1FF3F3443C0A0AF828A94B529C0B939F1FF3F3443C0507AD23D8CC029C09037F1FF0F3743C0507AD23D8CC029C0 0 \N \N \N t \N +69 1 6443 7602 14 0106000020E610000001000000010300000001000000050000009037F1FF0F3743C0A0AF828A94B529C09037F1FF0F3743C0C8FBDE999CAA29C0B939F1FF3F3443C0C8FBDE999CAA29C0B939F1FF3F3443C0A0AF828A94B529C09037F1FF0F3743C0A0AF828A94B529C0 0 \N \N \N t \N +70 1 6443 7603 14 0106000020E610000001000000010300000001000000050000009037F1FF0F3743C0C8FBDE999CAA29C09037F1FF0F3743C070C2FF6BA49F29C0B939F1FF3F3443C070C2FF6BA49F29C0B939F1FF3F3443C0C8FBDE999CAA29C09037F1FF0F3743C0C8FBDE999CAA29C0 0 \N \N \N t \N +71 1 6443 7604 14 0106000020E610000001000000010300000001000000050000009037F1FF0F3743C070C2FF6BA49F29C09037F1FF0F3743C0D869FD00AC9429C0B939F1FF3F3443C0D869FD00AC9429C0B939F1FF3F3443C070C2FF6BA49F29C09037F1FF0F3743C070C2FF6BA49F29C0 0 \N \N \N t \N +72 1 6443 7605 14 0106000020E610000001000000010300000001000000050000009037F1FF0F3743C0D869FD00AC9429C09037F1FF0F3743C0205BF058B38929C0B939F1FF3F3443C0205BF058B38929C0B939F1FF3F3443C0D869FD00AC9429C09037F1FF0F3743C0D869FD00AC9429C0 0 \N \N \N t \N +73 1 6444 7594 14 0106000020E61000000100000001030000000100000005000000B939F1FF3F3443C0E020D5674B0D2AC0B939F1FF3F3443C080B07A6455022AC0E43BF1FF6F3143C080B07A6455022AC0E43BF1FF6F3143C0E020D5674B0D2AC0B939F1FF3F3443C0E020D5674B0D2AC0 0 \N \N \N t \N +74 1 6444 7595 14 0106000020E61000000100000001030000000100000005000000B939F1FF3F3443C080B07A6455022AC0B939F1FF3F3443C0F0EB21235FF729C0E43BF1FF6F3143C0F0EB21235FF729C0E43BF1FF6F3143C080B07A6455022AC0B939F1FF3F3443C080B07A6455022AC0 0 \N \N \N t \N +75 1 6444 7596 14 0106000020E61000000100000001030000000100000005000000B939F1FF3F3443C0F0EB21235FF729C0B939F1FF3F3443C05023E3A368EC29C0E43BF1FF6F3143C05023E3A368EC29C0E43BF1FF6F3143C0F0EB21235FF729C0B939F1FF3F3443C0F0EB21235FF729C0 0 \N \N \N t \N +76 1 6444 7597 14 0106000020E61000000100000001030000000100000005000000B939F1FF3F3443C05023E3A368EC29C0B939F1FF3F3443C060A9D6E671E129C0E43BF1FF6F3143C060A9D6E671E129C0E43BF1FF6F3143C05023E3A368EC29C0B939F1FF3F3443C05023E3A368EC29C0 0 \N \N \N t \N +77 1 6444 7598 14 0106000020E61000000100000001030000000100000005000000B939F1FF3F3443C060A9D6E671E129C0B939F1FF3F3443C0D0D314EC7AD629C0E43BF1FF6F3143C0D0D314EC7AD629C0E43BF1FF6F3143C060A9D6E671E129C0B939F1FF3F3443C060A9D6E671E129C0 0 \N \N \N t \N +78 1 6444 7599 14 0106000020E61000000100000001030000000100000005000000B939F1FF3F3443C0D0D314EC7AD629C0B939F1FF3F3443C008FBB5B383CB29C0E43BF1FF6F3143C008FBB5B383CB29C0E43BF1FF6F3143C0D0D314EC7AD629C0B939F1FF3F3443C0D0D314EC7AD629C0 0 \N \N \N t \N +79 1 6444 7600 14 0106000020E61000000100000001030000000100000005000000B939F1FF3F3443C008FBB5B383CB29C0B939F1FF3F3443C0507AD23D8CC029C0E43BF1FF6F3143C0507AD23D8CC029C0E43BF1FF6F3143C008FBB5B383CB29C0B939F1FF3F3443C008FBB5B383CB29C0 0 \N \N \N t \N +80 1 6444 7601 14 0106000020E61000000100000001030000000100000005000000B939F1FF3F3443C0507AD23D8CC029C0B939F1FF3F3443C0A0AF828A94B529C0E43BF1FF6F3143C0A0AF828A94B529C0E43BF1FF6F3143C0507AD23D8CC029C0B939F1FF3F3443C0507AD23D8CC029C0 0 \N \N \N t \N +81 1 6444 7602 14 0106000020E61000000100000001030000000100000005000000B939F1FF3F3443C0A0AF828A94B529C0B939F1FF3F3443C0C8FBDE999CAA29C0E43BF1FF6F3143C0C8FBDE999CAA29C0E43BF1FF6F3143C0A0AF828A94B529C0B939F1FF3F3443C0A0AF828A94B529C0 0 \N \N \N t \N +82 1 6444 7603 14 0106000020E61000000100000001030000000100000005000000B939F1FF3F3443C0C8FBDE999CAA29C0B939F1FF3F3443C070C2FF6BA49F29C0E43BF1FF6F3143C070C2FF6BA49F29C0E43BF1FF6F3143C0C8FBDE999CAA29C0B939F1FF3F3443C0C8FBDE999CAA29C0 0 \N \N \N t \N +84 1 6444 7605 14 0106000020E61000000100000001030000000100000005000000B939F1FF3F3443C0D869FD00AC9429C0B939F1FF3F3443C0205BF058B38929C0E43BF1FF6F3143C0205BF058B38929C0E43BF1FF6F3143C0D869FD00AC9429C0B939F1FF3F3443C0D869FD00AC9429C0 0 \N \N \N t \N +85 1 6445 7594 14 0106000020E61000000100000001030000000100000005000000E43BF1FF6F3143C0E020D5674B0D2AC0E43BF1FF6F3143C080B07A6455022AC00D3EF1FF9F2E43C080B07A6455022AC00D3EF1FF9F2E43C0E020D5674B0D2AC0E43BF1FF6F3143C0E020D5674B0D2AC0 0 \N \N \N t \N +86 1 6445 7595 14 0106000020E61000000100000001030000000100000005000000E43BF1FF6F3143C080B07A6455022AC0E43BF1FF6F3143C0F0EB21235FF729C00D3EF1FF9F2E43C0F0EB21235FF729C00D3EF1FF9F2E43C080B07A6455022AC0E43BF1FF6F3143C080B07A6455022AC0 0 \N \N \N t \N +87 1 6445 7596 14 0106000020E61000000100000001030000000100000005000000E43BF1FF6F3143C0F0EB21235FF729C0E43BF1FF6F3143C05023E3A368EC29C00D3EF1FF9F2E43C05023E3A368EC29C00D3EF1FF9F2E43C0F0EB21235FF729C0E43BF1FF6F3143C0F0EB21235FF729C0 0 \N \N \N t \N +88 1 6445 7597 14 0106000020E61000000100000001030000000100000005000000E43BF1FF6F3143C05023E3A368EC29C0E43BF1FF6F3143C060A9D6E671E129C00D3EF1FF9F2E43C060A9D6E671E129C00D3EF1FF9F2E43C05023E3A368EC29C0E43BF1FF6F3143C05023E3A368EC29C0 0 \N \N \N t \N +89 1 6445 7598 14 0106000020E61000000100000001030000000100000005000000E43BF1FF6F3143C060A9D6E671E129C0E43BF1FF6F3143C0D0D314EC7AD629C00D3EF1FF9F2E43C0D0D314EC7AD629C00D3EF1FF9F2E43C060A9D6E671E129C0E43BF1FF6F3143C060A9D6E671E129C0 0 \N \N \N t \N +90 1 6445 7599 14 0106000020E61000000100000001030000000100000005000000E43BF1FF6F3143C0D0D314EC7AD629C0E43BF1FF6F3143C008FBB5B383CB29C00D3EF1FF9F2E43C008FBB5B383CB29C00D3EF1FF9F2E43C0D0D314EC7AD629C0E43BF1FF6F3143C0D0D314EC7AD629C0 0 \N \N \N t \N +91 1 6445 7600 14 0106000020E61000000100000001030000000100000005000000E43BF1FF6F3143C008FBB5B383CB29C0E43BF1FF6F3143C0507AD23D8CC029C00D3EF1FF9F2E43C0507AD23D8CC029C00D3EF1FF9F2E43C008FBB5B383CB29C0E43BF1FF6F3143C008FBB5B383CB29C0 0 \N \N \N t \N +92 1 6445 7601 14 0106000020E61000000100000001030000000100000005000000E43BF1FF6F3143C0507AD23D8CC029C0E43BF1FF6F3143C0A0AF828A94B529C00D3EF1FF9F2E43C0A0AF828A94B529C00D3EF1FF9F2E43C0507AD23D8CC029C0E43BF1FF6F3143C0507AD23D8CC029C0 0 \N \N \N t \N +93 1 6445 7602 14 0106000020E61000000100000001030000000100000005000000E43BF1FF6F3143C0A0AF828A94B529C0E43BF1FF6F3143C0C8FBDE999CAA29C00D3EF1FF9F2E43C0C8FBDE999CAA29C00D3EF1FF9F2E43C0A0AF828A94B529C0E43BF1FF6F3143C0A0AF828A94B529C0 0 \N \N \N t \N +94 1 6445 7603 14 0106000020E61000000100000001030000000100000005000000E43BF1FF6F3143C0C8FBDE999CAA29C0E43BF1FF6F3143C070C2FF6BA49F29C00D3EF1FF9F2E43C070C2FF6BA49F29C00D3EF1FF9F2E43C0C8FBDE999CAA29C0E43BF1FF6F3143C0C8FBDE999CAA29C0 0 \N \N \N t \N +95 1 6445 7604 14 0106000020E61000000100000001030000000100000005000000E43BF1FF6F3143C070C2FF6BA49F29C0E43BF1FF6F3143C0D869FD00AC9429C00D3EF1FF9F2E43C0D869FD00AC9429C00D3EF1FF9F2E43C070C2FF6BA49F29C0E43BF1FF6F3143C070C2FF6BA49F29C0 0 \N \N \N t \N +96 1 6445 7605 14 0106000020E61000000100000001030000000100000005000000E43BF1FF6F3143C0D869FD00AC9429C0E43BF1FF6F3143C0205BF058B38929C00D3EF1FF9F2E43C0205BF058B38929C00D3EF1FF9F2E43C0D869FD00AC9429C0E43BF1FF6F3143C0D869FD00AC9429C0 0 \N \N \N t \N +97 1 6446 7594 14 0106000020E610000001000000010300000001000000050000000D3EF1FF9F2E43C0E020D5674B0D2AC00D3EF1FF9F2E43C080B07A6455022AC03940F1FFCF2B43C080B07A6455022AC03940F1FFCF2B43C0E020D5674B0D2AC00D3EF1FF9F2E43C0E020D5674B0D2AC0 0 \N \N \N t \N +98 1 6446 7595 14 0106000020E610000001000000010300000001000000050000000D3EF1FF9F2E43C080B07A6455022AC00D3EF1FF9F2E43C0F0EB21235FF729C03940F1FFCF2B43C0F0EB21235FF729C03940F1FFCF2B43C080B07A6455022AC00D3EF1FF9F2E43C080B07A6455022AC0 0 \N \N \N t \N +99 1 6446 7596 14 0106000020E610000001000000010300000001000000050000000D3EF1FF9F2E43C0F0EB21235FF729C00D3EF1FF9F2E43C05023E3A368EC29C03940F1FFCF2B43C05023E3A368EC29C03940F1FFCF2B43C0F0EB21235FF729C00D3EF1FF9F2E43C0F0EB21235FF729C0 0 \N \N \N t \N +100 1 6446 7597 14 0106000020E610000001000000010300000001000000050000000D3EF1FF9F2E43C05023E3A368EC29C00D3EF1FF9F2E43C060A9D6E671E129C03940F1FFCF2B43C060A9D6E671E129C03940F1FFCF2B43C05023E3A368EC29C00D3EF1FF9F2E43C05023E3A368EC29C0 0 \N \N \N t \N +101 1 6446 7598 14 0106000020E610000001000000010300000001000000050000000D3EF1FF9F2E43C060A9D6E671E129C00D3EF1FF9F2E43C0D0D314EC7AD629C03940F1FFCF2B43C0D0D314EC7AD629C03940F1FFCF2B43C060A9D6E671E129C00D3EF1FF9F2E43C060A9D6E671E129C0 0 \N \N \N t \N +102 1 6446 7599 14 0106000020E610000001000000010300000001000000050000000D3EF1FF9F2E43C0D0D314EC7AD629C00D3EF1FF9F2E43C008FBB5B383CB29C03940F1FFCF2B43C008FBB5B383CB29C03940F1FFCF2B43C0D0D314EC7AD629C00D3EF1FF9F2E43C0D0D314EC7AD629C0 0 \N \N \N t \N +103 1 6446 7600 14 0106000020E610000001000000010300000001000000050000000D3EF1FF9F2E43C008FBB5B383CB29C00D3EF1FF9F2E43C0507AD23D8CC029C03940F1FFCF2B43C0507AD23D8CC029C03940F1FFCF2B43C008FBB5B383CB29C00D3EF1FF9F2E43C008FBB5B383CB29C0 0 \N \N \N t \N +104 1 6446 7601 14 0106000020E610000001000000010300000001000000050000000D3EF1FF9F2E43C0507AD23D8CC029C00D3EF1FF9F2E43C0A0AF828A94B529C03940F1FFCF2B43C0A0AF828A94B529C03940F1FFCF2B43C0507AD23D8CC029C00D3EF1FF9F2E43C0507AD23D8CC029C0 0 \N \N \N t \N +105 1 6446 7602 14 0106000020E610000001000000010300000001000000050000000D3EF1FF9F2E43C0A0AF828A94B529C00D3EF1FF9F2E43C0C8FBDE999CAA29C03940F1FFCF2B43C0C8FBDE999CAA29C03940F1FFCF2B43C0A0AF828A94B529C00D3EF1FF9F2E43C0A0AF828A94B529C0 0 \N \N \N t \N +106 1 6446 7603 14 0106000020E610000001000000010300000001000000050000000D3EF1FF9F2E43C0C8FBDE999CAA29C00D3EF1FF9F2E43C070C2FF6BA49F29C03940F1FFCF2B43C070C2FF6BA49F29C03940F1FFCF2B43C0C8FBDE999CAA29C00D3EF1FF9F2E43C0C8FBDE999CAA29C0 0 \N \N \N t \N +107 1 6446 7604 14 0106000020E610000001000000010300000001000000050000000D3EF1FF9F2E43C070C2FF6BA49F29C00D3EF1FF9F2E43C0D869FD00AC9429C03940F1FFCF2B43C0D869FD00AC9429C03940F1FFCF2B43C070C2FF6BA49F29C00D3EF1FF9F2E43C070C2FF6BA49F29C0 0 \N \N \N t \N +108 1 6446 7605 14 0106000020E610000001000000010300000001000000050000000D3EF1FF9F2E43C0D869FD00AC9429C00D3EF1FF9F2E43C0205BF058B38929C03940F1FFCF2B43C0205BF058B38929C03940F1FFCF2B43C0D869FD00AC9429C00D3EF1FF9F2E43C0D869FD00AC9429C0 0 \N \N \N t \N +109 1 6447 7594 14 0106000020E610000001000000010300000001000000050000003940F1FFCF2B43C0E020D5674B0D2AC03940F1FFCF2B43C080B07A6455022AC06242F1FFFF2843C080B07A6455022AC06242F1FFFF2843C0E020D5674B0D2AC03940F1FFCF2B43C0E020D5674B0D2AC0 0 \N \N \N t \N +110 1 6447 7595 14 0106000020E610000001000000010300000001000000050000003940F1FFCF2B43C080B07A6455022AC03940F1FFCF2B43C0F0EB21235FF729C06242F1FFFF2843C0F0EB21235FF729C06242F1FFFF2843C080B07A6455022AC03940F1FFCF2B43C080B07A6455022AC0 0 \N \N \N t \N +111 1 6447 7596 14 0106000020E610000001000000010300000001000000050000003940F1FFCF2B43C0F0EB21235FF729C03940F1FFCF2B43C05023E3A368EC29C06242F1FFFF2843C05023E3A368EC29C06242F1FFFF2843C0F0EB21235FF729C03940F1FFCF2B43C0F0EB21235FF729C0 0 \N \N \N t \N +112 1 6447 7597 14 0106000020E610000001000000010300000001000000050000003940F1FFCF2B43C05023E3A368EC29C03940F1FFCF2B43C060A9D6E671E129C06242F1FFFF2843C060A9D6E671E129C06242F1FFFF2843C05023E3A368EC29C03940F1FFCF2B43C05023E3A368EC29C0 0 \N \N \N t \N +113 1 6447 7598 14 0106000020E610000001000000010300000001000000050000003940F1FFCF2B43C060A9D6E671E129C03940F1FFCF2B43C0D0D314EC7AD629C06242F1FFFF2843C0D0D314EC7AD629C06242F1FFFF2843C060A9D6E671E129C03940F1FFCF2B43C060A9D6E671E129C0 0 \N \N \N t \N +114 1 6447 7599 14 0106000020E610000001000000010300000001000000050000003940F1FFCF2B43C0D0D314EC7AD629C03940F1FFCF2B43C008FBB5B383CB29C06242F1FFFF2843C008FBB5B383CB29C06242F1FFFF2843C0D0D314EC7AD629C03940F1FFCF2B43C0D0D314EC7AD629C0 0 \N \N \N t \N +115 1 6447 7600 14 0106000020E610000001000000010300000001000000050000003940F1FFCF2B43C008FBB5B383CB29C03940F1FFCF2B43C0507AD23D8CC029C06242F1FFFF2843C0507AD23D8CC029C06242F1FFFF2843C008FBB5B383CB29C03940F1FFCF2B43C008FBB5B383CB29C0 0 \N \N \N t \N +116 1 6447 7601 14 0106000020E610000001000000010300000001000000050000003940F1FFCF2B43C0507AD23D8CC029C03940F1FFCF2B43C0A0AF828A94B529C06242F1FFFF2843C0A0AF828A94B529C06242F1FFFF2843C0507AD23D8CC029C03940F1FFCF2B43C0507AD23D8CC029C0 0 \N \N \N t \N +117 1 6447 7602 14 0106000020E610000001000000010300000001000000050000003940F1FFCF2B43C0A0AF828A94B529C03940F1FFCF2B43C0C8FBDE999CAA29C06242F1FFFF2843C0C8FBDE999CAA29C06242F1FFFF2843C0A0AF828A94B529C03940F1FFCF2B43C0A0AF828A94B529C0 0 \N \N \N t \N +118 1 6447 7603 14 0106000020E610000001000000010300000001000000050000003940F1FFCF2B43C0C8FBDE999CAA29C03940F1FFCF2B43C070C2FF6BA49F29C06242F1FFFF2843C070C2FF6BA49F29C06242F1FFFF2843C0C8FBDE999CAA29C03940F1FFCF2B43C0C8FBDE999CAA29C0 0 \N \N \N t \N +119 1 6447 7604 14 0106000020E610000001000000010300000001000000050000003940F1FFCF2B43C070C2FF6BA49F29C03940F1FFCF2B43C0D869FD00AC9429C06242F1FFFF2843C0D869FD00AC9429C06242F1FFFF2843C070C2FF6BA49F29C03940F1FFCF2B43C070C2FF6BA49F29C0 0 \N \N \N t \N +120 1 6447 7605 14 0106000020E610000001000000010300000001000000050000003940F1FFCF2B43C0D869FD00AC9429C03940F1FFCF2B43C0205BF058B38929C06242F1FFFF2843C0205BF058B38929C06242F1FFFF2843C0D869FD00AC9429C03940F1FFCF2B43C0D869FD00AC9429C0 0 \N \N \N t \N +121 1 6448 7594 14 0106000020E610000001000000010300000001000000050000006242F1FFFF2843C0E020D5674B0D2AC06242F1FFFF2843C080B07A6455022AC08C44F1FF2F2643C080B07A6455022AC08C44F1FF2F2643C0E020D5674B0D2AC06242F1FFFF2843C0E020D5674B0D2AC0 0 \N \N \N t \N +122 1 6448 7595 14 0106000020E610000001000000010300000001000000050000006242F1FFFF2843C080B07A6455022AC06242F1FFFF2843C0F0EB21235FF729C08C44F1FF2F2643C0F0EB21235FF729C08C44F1FF2F2643C080B07A6455022AC06242F1FFFF2843C080B07A6455022AC0 0 \N \N \N t \N +123 1 6448 7596 14 0106000020E610000001000000010300000001000000050000006242F1FFFF2843C0F0EB21235FF729C06242F1FFFF2843C05023E3A368EC29C08C44F1FF2F2643C05023E3A368EC29C08C44F1FF2F2643C0F0EB21235FF729C06242F1FFFF2843C0F0EB21235FF729C0 0 \N \N \N t \N +124 1 6448 7597 14 0106000020E610000001000000010300000001000000050000006242F1FFFF2843C05023E3A368EC29C06242F1FFFF2843C060A9D6E671E129C08C44F1FF2F2643C060A9D6E671E129C08C44F1FF2F2643C05023E3A368EC29C06242F1FFFF2843C05023E3A368EC29C0 0 \N \N \N t \N +125 1 6448 7598 14 0106000020E610000001000000010300000001000000050000006242F1FFFF2843C060A9D6E671E129C06242F1FFFF2843C0D0D314EC7AD629C08C44F1FF2F2643C0D0D314EC7AD629C08C44F1FF2F2643C060A9D6E671E129C06242F1FFFF2843C060A9D6E671E129C0 0 \N \N \N t \N +126 1 6448 7599 14 0106000020E610000001000000010300000001000000050000006242F1FFFF2843C0D0D314EC7AD629C06242F1FFFF2843C008FBB5B383CB29C08C44F1FF2F2643C008FBB5B383CB29C08C44F1FF2F2643C0D0D314EC7AD629C06242F1FFFF2843C0D0D314EC7AD629C0 0 \N \N \N t \N +127 1 6448 7600 14 0106000020E610000001000000010300000001000000050000006242F1FFFF2843C008FBB5B383CB29C06242F1FFFF2843C0507AD23D8CC029C08C44F1FF2F2643C0507AD23D8CC029C08C44F1FF2F2643C008FBB5B383CB29C06242F1FFFF2843C008FBB5B383CB29C0 0 \N \N \N t \N +128 1 6448 7601 14 0106000020E610000001000000010300000001000000050000006242F1FFFF2843C0507AD23D8CC029C06242F1FFFF2843C0A0AF828A94B529C08C44F1FF2F2643C0A0AF828A94B529C08C44F1FF2F2643C0507AD23D8CC029C06242F1FFFF2843C0507AD23D8CC029C0 0 \N \N \N t \N +130 1 6448 7603 14 0106000020E610000001000000010300000001000000050000006242F1FFFF2843C0C8FBDE999CAA29C06242F1FFFF2843C070C2FF6BA49F29C08C44F1FF2F2643C070C2FF6BA49F29C08C44F1FF2F2643C0C8FBDE999CAA29C06242F1FFFF2843C0C8FBDE999CAA29C0 0 \N \N \N t \N +131 1 6448 7604 14 0106000020E610000001000000010300000001000000050000006242F1FFFF2843C070C2FF6BA49F29C06242F1FFFF2843C0D869FD00AC9429C08C44F1FF2F2643C0D869FD00AC9429C08C44F1FF2F2643C070C2FF6BA49F29C06242F1FFFF2843C070C2FF6BA49F29C0 0 \N \N \N t \N +132 1 6448 7605 14 0106000020E610000001000000010300000001000000050000006242F1FFFF2843C0D869FD00AC9429C06242F1FFFF2843C0205BF058B38929C08C44F1FF2F2643C0205BF058B38929C08C44F1FF2F2643C0D869FD00AC9429C06242F1FFFF2843C0D869FD00AC9429C0 0 \N \N \N t \N +133 1 6449 7594 14 0106000020E610000001000000010300000001000000050000008C44F1FF2F2643C0E020D5674B0D2AC08C44F1FF2F2643C080B07A6455022AC0B546F1FF5F2343C080B07A6455022AC0B546F1FF5F2343C0E020D5674B0D2AC08C44F1FF2F2643C0E020D5674B0D2AC0 0 \N \N \N t \N +134 1 6449 7595 14 0106000020E610000001000000010300000001000000050000008C44F1FF2F2643C080B07A6455022AC08C44F1FF2F2643C0F0EB21235FF729C0B546F1FF5F2343C0F0EB21235FF729C0B546F1FF5F2343C080B07A6455022AC08C44F1FF2F2643C080B07A6455022AC0 0 \N \N \N t \N +135 1 6449 7596 14 0106000020E610000001000000010300000001000000050000008C44F1FF2F2643C0F0EB21235FF729C08C44F1FF2F2643C05023E3A368EC29C0B546F1FF5F2343C05023E3A368EC29C0B546F1FF5F2343C0F0EB21235FF729C08C44F1FF2F2643C0F0EB21235FF729C0 0 \N \N \N t \N +136 1 6449 7597 14 0106000020E610000001000000010300000001000000050000008C44F1FF2F2643C05023E3A368EC29C08C44F1FF2F2643C060A9D6E671E129C0B546F1FF5F2343C060A9D6E671E129C0B546F1FF5F2343C05023E3A368EC29C08C44F1FF2F2643C05023E3A368EC29C0 0 \N \N \N t \N +137 1 6449 7598 14 0106000020E610000001000000010300000001000000050000008C44F1FF2F2643C060A9D6E671E129C08C44F1FF2F2643C0D0D314EC7AD629C0B546F1FF5F2343C0D0D314EC7AD629C0B546F1FF5F2343C060A9D6E671E129C08C44F1FF2F2643C060A9D6E671E129C0 0 \N \N \N t \N +138 1 6449 7599 14 0106000020E610000001000000010300000001000000050000008C44F1FF2F2643C0D0D314EC7AD629C08C44F1FF2F2643C008FBB5B383CB29C0B546F1FF5F2343C008FBB5B383CB29C0B546F1FF5F2343C0D0D314EC7AD629C08C44F1FF2F2643C0D0D314EC7AD629C0 0 \N \N \N t \N +139 1 6449 7600 14 0106000020E610000001000000010300000001000000050000008C44F1FF2F2643C008FBB5B383CB29C08C44F1FF2F2643C0507AD23D8CC029C0B546F1FF5F2343C0507AD23D8CC029C0B546F1FF5F2343C008FBB5B383CB29C08C44F1FF2F2643C008FBB5B383CB29C0 0 \N \N \N t \N +140 1 6449 7601 14 0106000020E610000001000000010300000001000000050000008C44F1FF2F2643C0507AD23D8CC029C08C44F1FF2F2643C0A0AF828A94B529C0B546F1FF5F2343C0A0AF828A94B529C0B546F1FF5F2343C0507AD23D8CC029C08C44F1FF2F2643C0507AD23D8CC029C0 0 \N \N \N t \N +141 1 6449 7602 14 0106000020E610000001000000010300000001000000050000008C44F1FF2F2643C0A0AF828A94B529C08C44F1FF2F2643C0C8FBDE999CAA29C0B546F1FF5F2343C0C8FBDE999CAA29C0B546F1FF5F2343C0A0AF828A94B529C08C44F1FF2F2643C0A0AF828A94B529C0 0 \N \N \N t \N +142 1 6449 7603 14 0106000020E610000001000000010300000001000000050000008C44F1FF2F2643C0C8FBDE999CAA29C08C44F1FF2F2643C070C2FF6BA49F29C0B546F1FF5F2343C070C2FF6BA49F29C0B546F1FF5F2343C0C8FBDE999CAA29C08C44F1FF2F2643C0C8FBDE999CAA29C0 0 \N \N \N t \N +143 1 6449 7604 14 0106000020E610000001000000010300000001000000050000008C44F1FF2F2643C070C2FF6BA49F29C08C44F1FF2F2643C0D869FD00AC9429C0B546F1FF5F2343C0D869FD00AC9429C0B546F1FF5F2343C070C2FF6BA49F29C08C44F1FF2F2643C070C2FF6BA49F29C0 0 \N \N \N t \N +144 1 6449 7605 14 0106000020E610000001000000010300000001000000050000008C44F1FF2F2643C0D869FD00AC9429C08C44F1FF2F2643C0205BF058B38929C0B546F1FF5F2343C0205BF058B38929C0B546F1FF5F2343C0D869FD00AC9429C08C44F1FF2F2643C0D869FD00AC9429C0 0 \N \N \N t \N +145 1 6450 7594 14 0106000020E61000000100000001030000000100000005000000B546F1FF5F2343C0E020D5674B0D2AC0B546F1FF5F2343C080B07A6455022AC0DE48F1FF8F2043C080B07A6455022AC0DE48F1FF8F2043C0E020D5674B0D2AC0B546F1FF5F2343C0E020D5674B0D2AC0 0 \N \N \N t \N +146 1 6450 7595 14 0106000020E61000000100000001030000000100000005000000B546F1FF5F2343C080B07A6455022AC0B546F1FF5F2343C0F0EB21235FF729C0DE48F1FF8F2043C0F0EB21235FF729C0DE48F1FF8F2043C080B07A6455022AC0B546F1FF5F2343C080B07A6455022AC0 0 \N \N \N t \N +147 1 6450 7596 14 0106000020E61000000100000001030000000100000005000000B546F1FF5F2343C0F0EB21235FF729C0B546F1FF5F2343C05023E3A368EC29C0DE48F1FF8F2043C05023E3A368EC29C0DE48F1FF8F2043C0F0EB21235FF729C0B546F1FF5F2343C0F0EB21235FF729C0 0 \N \N \N t \N +148 1 6450 7597 14 0106000020E61000000100000001030000000100000005000000B546F1FF5F2343C05023E3A368EC29C0B546F1FF5F2343C060A9D6E671E129C0DE48F1FF8F2043C060A9D6E671E129C0DE48F1FF8F2043C05023E3A368EC29C0B546F1FF5F2343C05023E3A368EC29C0 0 \N \N \N t \N +149 1 6450 7598 14 0106000020E61000000100000001030000000100000005000000B546F1FF5F2343C060A9D6E671E129C0B546F1FF5F2343C0D0D314EC7AD629C0DE48F1FF8F2043C0D0D314EC7AD629C0DE48F1FF8F2043C060A9D6E671E129C0B546F1FF5F2343C060A9D6E671E129C0 0 \N \N \N t \N +150 1 6450 7599 14 0106000020E61000000100000001030000000100000005000000B546F1FF5F2343C0D0D314EC7AD629C0B546F1FF5F2343C008FBB5B383CB29C0DE48F1FF8F2043C008FBB5B383CB29C0DE48F1FF8F2043C0D0D314EC7AD629C0B546F1FF5F2343C0D0D314EC7AD629C0 0 \N \N \N t \N +151 1 6450 7600 14 0106000020E61000000100000001030000000100000005000000B546F1FF5F2343C008FBB5B383CB29C0B546F1FF5F2343C0507AD23D8CC029C0DE48F1FF8F2043C0507AD23D8CC029C0DE48F1FF8F2043C008FBB5B383CB29C0B546F1FF5F2343C008FBB5B383CB29C0 0 \N \N \N t \N +152 1 6450 7601 14 0106000020E61000000100000001030000000100000005000000B546F1FF5F2343C0507AD23D8CC029C0B546F1FF5F2343C0A0AF828A94B529C0DE48F1FF8F2043C0A0AF828A94B529C0DE48F1FF8F2043C0507AD23D8CC029C0B546F1FF5F2343C0507AD23D8CC029C0 0 \N \N \N t \N +153 1 6450 7602 14 0106000020E61000000100000001030000000100000005000000B546F1FF5F2343C0A0AF828A94B529C0B546F1FF5F2343C0C8FBDE999CAA29C0DE48F1FF8F2043C0C8FBDE999CAA29C0DE48F1FF8F2043C0A0AF828A94B529C0B546F1FF5F2343C0A0AF828A94B529C0 0 \N \N \N t \N +154 1 6450 7603 14 0106000020E61000000100000001030000000100000005000000B546F1FF5F2343C0C8FBDE999CAA29C0B546F1FF5F2343C070C2FF6BA49F29C0DE48F1FF8F2043C070C2FF6BA49F29C0DE48F1FF8F2043C0C8FBDE999CAA29C0B546F1FF5F2343C0C8FBDE999CAA29C0 0 \N \N \N t \N +155 1 6450 7604 14 0106000020E61000000100000001030000000100000005000000B546F1FF5F2343C070C2FF6BA49F29C0B546F1FF5F2343C0D869FD00AC9429C0DE48F1FF8F2043C0D869FD00AC9429C0DE48F1FF8F2043C070C2FF6BA49F29C0B546F1FF5F2343C070C2FF6BA49F29C0 0 \N \N \N t \N +156 1 6450 7605 14 0106000020E61000000100000001030000000100000005000000B546F1FF5F2343C0D869FD00AC9429C0B546F1FF5F2343C0205BF058B38929C0DE48F1FF8F2043C0205BF058B38929C0DE48F1FF8F2043C0D869FD00AC9429C0B546F1FF5F2343C0D869FD00AC9429C0 0 \N \N \N t \N +157 1 6451 7594 14 0106000020E61000000100000001030000000100000005000000DE48F1FF8F2043C0E020D5674B0D2AC0DE48F1FF8F2043C080B07A6455022AC00A4BF1FFBF1D43C080B07A6455022AC00A4BF1FFBF1D43C0E020D5674B0D2AC0DE48F1FF8F2043C0E020D5674B0D2AC0 0 \N \N \N t \N +158 1 6451 7595 14 0106000020E61000000100000001030000000100000005000000DE48F1FF8F2043C080B07A6455022AC0DE48F1FF8F2043C0F0EB21235FF729C00A4BF1FFBF1D43C0F0EB21235FF729C00A4BF1FFBF1D43C080B07A6455022AC0DE48F1FF8F2043C080B07A6455022AC0 0 \N \N \N t \N +159 1 6451 7596 14 0106000020E61000000100000001030000000100000005000000DE48F1FF8F2043C0F0EB21235FF729C0DE48F1FF8F2043C05023E3A368EC29C00A4BF1FFBF1D43C05023E3A368EC29C00A4BF1FFBF1D43C0F0EB21235FF729C0DE48F1FF8F2043C0F0EB21235FF729C0 0 \N \N \N t \N +160 1 6451 7597 14 0106000020E61000000100000001030000000100000005000000DE48F1FF8F2043C05023E3A368EC29C0DE48F1FF8F2043C060A9D6E671E129C00A4BF1FFBF1D43C060A9D6E671E129C00A4BF1FFBF1D43C05023E3A368EC29C0DE48F1FF8F2043C05023E3A368EC29C0 0 \N \N \N t \N +161 1 6451 7598 14 0106000020E61000000100000001030000000100000005000000DE48F1FF8F2043C060A9D6E671E129C0DE48F1FF8F2043C0D0D314EC7AD629C00A4BF1FFBF1D43C0D0D314EC7AD629C00A4BF1FFBF1D43C060A9D6E671E129C0DE48F1FF8F2043C060A9D6E671E129C0 0 \N \N \N t \N +162 1 6451 7599 14 0106000020E61000000100000001030000000100000005000000DE48F1FF8F2043C0D0D314EC7AD629C0DE48F1FF8F2043C008FBB5B383CB29C00A4BF1FFBF1D43C008FBB5B383CB29C00A4BF1FFBF1D43C0D0D314EC7AD629C0DE48F1FF8F2043C0D0D314EC7AD629C0 0 \N \N \N t \N +163 1 6451 7600 14 0106000020E61000000100000001030000000100000005000000DE48F1FF8F2043C008FBB5B383CB29C0DE48F1FF8F2043C0507AD23D8CC029C00A4BF1FFBF1D43C0507AD23D8CC029C00A4BF1FFBF1D43C008FBB5B383CB29C0DE48F1FF8F2043C008FBB5B383CB29C0 0 \N \N \N t \N +164 1 6451 7601 14 0106000020E61000000100000001030000000100000005000000DE48F1FF8F2043C0507AD23D8CC029C0DE48F1FF8F2043C0A0AF828A94B529C00A4BF1FFBF1D43C0A0AF828A94B529C00A4BF1FFBF1D43C0507AD23D8CC029C0DE48F1FF8F2043C0507AD23D8CC029C0 0 \N \N \N t \N +165 1 6451 7602 14 0106000020E61000000100000001030000000100000005000000DE48F1FF8F2043C0A0AF828A94B529C0DE48F1FF8F2043C0C8FBDE999CAA29C00A4BF1FFBF1D43C0C8FBDE999CAA29C00A4BF1FFBF1D43C0A0AF828A94B529C0DE48F1FF8F2043C0A0AF828A94B529C0 0 \N \N \N t \N +166 1 6451 7603 14 0106000020E61000000100000001030000000100000005000000DE48F1FF8F2043C0C8FBDE999CAA29C0DE48F1FF8F2043C070C2FF6BA49F29C00A4BF1FFBF1D43C070C2FF6BA49F29C00A4BF1FFBF1D43C0C8FBDE999CAA29C0DE48F1FF8F2043C0C8FBDE999CAA29C0 0 \N \N \N t \N +167 1 6451 7604 14 0106000020E61000000100000001030000000100000005000000DE48F1FF8F2043C070C2FF6BA49F29C0DE48F1FF8F2043C0D869FD00AC9429C00A4BF1FFBF1D43C0D869FD00AC9429C00A4BF1FFBF1D43C070C2FF6BA49F29C0DE48F1FF8F2043C070C2FF6BA49F29C0 0 \N \N \N t \N +168 1 6451 7605 14 0106000020E61000000100000001030000000100000005000000DE48F1FF8F2043C0D869FD00AC9429C0DE48F1FF8F2043C0205BF058B38929C00A4BF1FFBF1D43C0205BF058B38929C00A4BF1FFBF1D43C0D869FD00AC9429C0DE48F1FF8F2043C0D869FD00AC9429C0 0 \N \N \N t \N +171 1 12889 15208 15 0106000020E61000000100000001030000000100000005000000C83AF1FFD73243C08EC2FF6BA49F29C0E13BF1FF6F3143C08EC2FF6BA49F29C0E13BF1FF6F3143C09373213E289A29C0C83AF1FFD73243C09373213E289A29C0C83AF1FFD73243C08EC2FF6BA49F29C0 0 \N \N \N t \N +12 2 \N \N \N 0106000020E61000000100000001030000000100000005000000E49ECD52E7E532406330BC5C89F640C082133037B2E43240E83320AF4AF740C093BB4F6D38E13240E83320AF4AF740C093BB4F6D38E132406330BC5C89F640C0E49ECD52E7E532406330BC5C89F640C0 0 \N \N \N f \N +13 2 \N \N \N 0106000020E6100000010000000103000000010000000500000093BB4F6D38E13240B1629661D4F540C0F7FF7FADC9E63240EFCFC9CBFBF540C0E49ECD52E7E532406330BC5C89F640C093BB4F6D38E132406330BC5C89F640C093BB4F6D38E13240B1629661D4F540C0 0 \N \N \N f \N +172 1 12889 15209 15 0106000020E61000000100000001030000000100000005000000C83AF1FFD73243C09373213E289A29C0E13BF1FF6F3143C09373213E289A29C0E13BF1FF6F3143C0E169FD00AC9429C0C83AF1FFD73243C0E169FD00AC9429C0C83AF1FFD73243C09373213E289A29C0 6 \N 360183 \N t \N +14 2 \N \N \N 0106000020E610000001000000010300000001000000050000005C52664B8DD9324070E0C44E52F940C0F6FF3FB6F2D632400BF53EC585F940C07DF31D1B6CD6324055235A8946F840C05C52664B8DD9324055235A8946F840C05C52664B8DD9324070E0C44E52F940C0 0 \N \N \N f \N +15 2 \N \N \N 0106000020E610000001000000010300000001000000050000007DF31D1B6CD6324055235A8946F840C0D8175CE901D63240E83320AF4AF740C05C52664B8DD93240E83320AF4AF740C05C52664B8DD9324055235A8946F840C07DF31D1B6CD6324055235A8946F840C0 0 \N \N \N f \N +169 1 12888 15208 15 0106000020E61000000100000001030000000100000005000000BC39F1FF3F3443C08EC2FF6BA49F29C0C83AF1FFD73243C08EC2FF6BA49F29C0C83AF1FFD73243C09373213E289A29C0BC39F1FF3F3443C09373213E289A29C0BC39F1FF3F3443C08EC2FF6BA49F29C0 0 \N \N \N t \N +17 2 \N \N \N 0106000020E610000001000000010300000001000000050000005C52664B8DD9324055235A8946F840C05C52664B8DD93240E83320AF4AF740C0385FA2C5E5DC3240E83320AF4AF740C0385FA2C5E5DC324055235A8946F840C05C52664B8DD9324055235A8946F840C0 0 \N \N \N f \N +129 1 6448 7602 14 0106000020E610000001000000010300000001000000050000006242F1FFFF2843C0A0AF828A94B529C06242F1FFFF2843C0C8FBDE999CAA29C08C44F1FF2F2643C0C8FBDE999CAA29C08C44F1FF2F2643C0A0AF828A94B529C06242F1FFFF2843C0A0AF828A94B529C0 6 \N 360183 \N t \N +18 2 \N \N \N 0106000020E610000001000000010300000001000000050000001C0821F41BE03240112867B5D0F840C0385FA2C5E5DC3240FBB72B2F10F940C0385FA2C5E5DC3240033CFBBF06F840C01C0821F41BE03240033CFBBF06F840C01C0821F41BE03240112867B5D0F840C0 0 \N \N \N f \N +170 1 12888 15209 15 0106000020E61000000100000001030000000100000005000000BC39F1FF3F3443C09373213E289A29C0C83AF1FFD73243C09373213E289A29C0C83AF1FFD73243C0E169FD00AC9429C0BC39F1FF3F3443C0E169FD00AC9429C0BC39F1FF3F3443C09373213E289A29C0 4 \N 360183 360183 t \N +1 3 \N \N \N 0106000020E610000001000000010300000001000000050000000100405B40D53240CEE033A57FF540C0000040B6F2D632400FF53EC585F940C00000C02390E23240EEEACD35A0F840C0010080ADC9E63240EBCFC9CBFBF540C00100405B40D53240CEE033A57FF540C0 0 \N \N \N f {} +19 2 \N \N \N 0106000020E61000000100000001030000000100000005000000385FA2C5E5DC3240033CFBBF06F840C0385FA2C5E5DC3240E83320AF4AF740C01C0821F41BE03240E83320AF4AF740C01C0821F41BE03240033CFBBF06F840C0385FA2C5E5DC3240033CFBBF06F840C0 0 \N \N \N f \N +20 2 \N \N \N 0106000020E6100000010000000103000000010000000500000077C0FC8285E33240033CFBBF06F840C00E00C02390E23240E8EACD35A0F840C01C0821F41BE03240112867B5D0F840C01C0821F41BE03240033CFBBF06F840C077C0FC8285E33240033CFBBF06F840C0 0 \N \N \N f \N +6 2 \N \N \N 0106000020E61000000100000001030000000100000005000000D8175CE901D63240E83320AF4AF740C01B6EF23DA5D532402DE641E86EF640C081542F2F30D932402DE641E86EF640C081542F2F30D93240E83320AF4AF740C0D8175CE901D63240E83320AF4AF740C0 0 \N \N \N f \N +7 2 \N \N \N 0106000020E610000001000000010300000001000000050000001B6EF23DA5D532402DE641E86EF640C0F6FF3F5B40D53240CFE033A57FF540C081542F2F30D9324029C537849BF540C081542F2F30D932402DE641E86EF640C01B6EF23DA5D532402DE641E86EF640C0 0 \N \N \N f \N +8 2 \N \N \N 0106000020E61000000100000001030000000100000005000000385FA2C5E5DC32402DE641E86EF640C0385FA2C5E5DC3240E83320AF4AF740C081542F2F30D93240E83320AF4AF740C081542F2F30D932402DE641E86EF640C0385FA2C5E5DC32402DE641E86EF640C0 0 \N \N \N f \N +10 2 \N \N \N 0106000020E6100000010000000103000000010000000500000093BB4F6D38E132406330BC5C89F640C093BB4F6D38E13240E83320AF4AF740C0385FA2C5E5DC3240E83320AF4AF740C0385FA2C5E5DC32406330BC5C89F640C093BB4F6D38E132406330BC5C89F640C0 0 \N \N \N f \N +11 2 \N \N \N 0106000020E61000000100000001030000000100000005000000385FA2C5E5DC32404883EAC6B5F540C093BB4F6D38E13240B1629661D4F540C093BB4F6D38E132406330BC5C89F640C0385FA2C5E5DC32406330BC5C89F640C0385FA2C5E5DC32404883EAC6B5F540C0 0 \N \N \N f \N +21 2 \N \N \N 0106000020E6100000010000000103000000010000000500000082133037B2E43240E83320AF4AF740C077C0FC8285E33240033CFBBF06F840C01C0821F41BE03240033CFBBF06F840C01C0821F41BE03240E83320AF4AF740C082133037B2E43240E83320AF4AF740C0 0 \N \N \N f \N +16 2 \N \N \N 0106000020E61000000100000001030000000100000005000000385FA2C5E5DC3240FBB72B2F10F940C05C52664B8DD9324070E0C44E52F940C05C52664B8DD9324055235A8946F840C0385FA2C5E5DC324055235A8946F840C0385FA2C5E5DC3240FBB72B2F10F940C0 6 \N 2823295 \N f \N +9 2 \N \N \N 0106000020E6100000010000000103000000010000000500000081542F2F30D9324029C537849BF540C0385FA2C5E5DC32404883EAC6B5F540C0385FA2C5E5DC32402DE641E86EF640C081542F2F30D932402DE641E86EF640C081542F2F30D9324029C537849BF540C0 2 \N 360183 \N f \N +\. + + +-- +-- Data for Name: users; Type: TABLE DATA; Schema: public; Owner: - +-- + +COPY public.users (id, username, role, mapping_level, tasks_mapped, tasks_validated, tasks_invalidated, projects_mapped, email_address, facebook_id, is_email_verified, linkedin_id, twitter_id, date_registered, last_validation_date, validation_message, is_expert) FROM stdin; +2823295 ramyaragupathy 0 3 0 0 0 {2} \N \N f \N \N 2019-04-09 03:11:54.503164 2019-04-09 03:11:54.503182 t f +360183 wille 1 3 2 1 0 {1,2} \N \N f \N \N 2019-04-08 10:51:26.758678 2019-04-08 11:40:37.239041 t f +94253 xamanu 1 3 0 0 0 \N \N \N f \N \N 2019-04-19 13:44:25.422959 2019-04-19 13:44:25.422967 t f +\. + + +-- +-- Data for Name: users_licenses; Type: TABLE DATA; Schema: public; Owner: - +-- + +COPY public.users_licenses ("user", license) FROM stdin; +\. + + +-- +-- Data for Name: topology; Type: TABLE DATA; Schema: topology; Owner: - +-- + +COPY topology.topology (id, name, srid, "precision", hasz) FROM stdin; +\. + + +-- +-- Data for Name: layer; Type: TABLE DATA; Schema: topology; Owner: - +-- + +COPY topology.layer (topology_id, layer_id, schema_name, table_name, feature_column, feature_type, level, child_id) FROM stdin; +\. + + +-- +-- Name: licenses_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('public.licenses_id_seq', 1, false); + + +-- +-- Name: messages_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('public.messages_id_seq', 2, true); + + +-- +-- Name: priority_areas_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('public.priority_areas_id_seq', 1, false); + + +-- +-- Name: project_chat_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('public.project_chat_id_seq', 1, false); + + +-- +-- Name: projects_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('public.projects_id_seq', 3, true); + + +-- +-- Name: tags_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('public.tags_id_seq', 1, false); + + +-- +-- Name: task_history_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('public.task_history_id_seq', 123, true); + + +-- +-- Name: task_invalidation_history_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('public.task_invalidation_history_id_seq', 1, false); + + +-- +-- Name: users_id_seq; Type: SEQUENCE SET; Schema: public; Owner: - +-- + +SELECT pg_catalog.setval('public.users_id_seq', 1, false); + + +-- +-- Name: alembic_version alembic_version_pkc; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.alembic_version + ADD CONSTRAINT alembic_version_pkc PRIMARY KEY (version_num); + + +-- +-- Name: licenses licenses_name_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.licenses + ADD CONSTRAINT licenses_name_key UNIQUE (name); + + +-- +-- Name: licenses licenses_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.licenses + ADD CONSTRAINT licenses_pkey PRIMARY KEY (id); + + +-- +-- Name: messages messages_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.messages + ADD CONSTRAINT messages_pkey PRIMARY KEY (id); + + +-- +-- Name: priority_areas priority_areas_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.priority_areas + ADD CONSTRAINT priority_areas_pkey PRIMARY KEY (id); + + +-- +-- Name: project_chat project_chat_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.project_chat + ADD CONSTRAINT project_chat_pkey PRIMARY KEY (id); + + +-- +-- Name: project_info project_info_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.project_info + ADD CONSTRAINT project_info_pkey PRIMARY KEY (project_id, locale); + + +-- +-- Name: projects projects_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.projects + ADD CONSTRAINT projects_pkey PRIMARY KEY (id); + + +-- +-- Name: tags tags_campaigns_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.tags + ADD CONSTRAINT tags_campaigns_key UNIQUE (campaigns); + + +-- +-- Name: tags tags_organisations_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.tags + ADD CONSTRAINT tags_organisations_key UNIQUE (organisations); + + +-- +-- Name: tags tags_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.tags + ADD CONSTRAINT tags_pkey PRIMARY KEY (id); + + +-- +-- Name: task_history task_history_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.task_history + ADD CONSTRAINT task_history_pkey PRIMARY KEY (id); + + +-- +-- Name: task_invalidation_history task_invalidation_history_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.task_invalidation_history + ADD CONSTRAINT task_invalidation_history_pkey PRIMARY KEY (id); + + +-- +-- Name: tasks tasks_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.tasks + ADD CONSTRAINT tasks_pkey PRIMARY KEY (id, project_id); + + +-- +-- Name: users users_pkey; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.users + ADD CONSTRAINT users_pkey PRIMARY KEY (id); + + +-- +-- Name: users users_username_key; Type: CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.users + ADD CONSTRAINT users_username_key UNIQUE (username); + + +-- +-- Name: idx_geometry; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX idx_geometry ON public.projects USING gist (geometry); + + +-- +-- Name: idx_project_info composite; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX "idx_project_info composite" ON public.project_info USING btree (locale, project_id); + + +-- +-- Name: idx_task_history_composite; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX idx_task_history_composite ON public.task_history USING btree (task_id, project_id); + + +-- +-- Name: idx_task_validation_history_composite; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX idx_task_validation_history_composite ON public.task_invalidation_history USING btree (task_id, project_id); + + +-- +-- Name: idx_task_validation_mapper_status_composite; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX idx_task_validation_mapper_status_composite ON public.task_invalidation_history USING btree (invalidator_id, is_closed); + + +-- +-- Name: idx_username_lower; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX idx_username_lower ON public.users USING btree (lower((username)::text)); + + +-- +-- Name: ix_messages_message_type; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ix_messages_message_type ON public.messages USING btree (message_type); + + +-- +-- Name: ix_messages_project_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ix_messages_project_id ON public.messages USING btree (project_id); + + +-- +-- Name: ix_messages_task_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ix_messages_task_id ON public.messages USING btree (task_id); + + +-- +-- Name: ix_messages_to_user_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ix_messages_to_user_id ON public.messages USING btree (to_user_id); + + +-- +-- Name: ix_project_chat_project_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ix_project_chat_project_id ON public.project_chat USING btree (project_id); + + +-- +-- Name: ix_projects_campaign_tag; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ix_projects_campaign_tag ON public.projects USING btree (campaign_tag); + + +-- +-- Name: ix_projects_mapper_level; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ix_projects_mapper_level ON public.projects USING btree (mapper_level); + + +-- +-- Name: ix_projects_mapping_types; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ix_projects_mapping_types ON public.projects USING btree (mapping_types); + + +-- +-- Name: ix_projects_organisation_tag; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ix_projects_organisation_tag ON public.projects USING btree (organisation_tag); + + +-- +-- Name: ix_task_history_project_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ix_task_history_project_id ON public.task_history USING btree (project_id); + + +-- +-- Name: ix_tasks_project_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ix_tasks_project_id ON public.tasks USING btree (project_id); + + +-- +-- Name: ix_users_id; Type: INDEX; Schema: public; Owner: - +-- + +CREATE INDEX ix_users_id ON public.users USING btree (id); + + +-- +-- Name: task_invalidation_history fk_invalidation_history; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.task_invalidation_history + ADD CONSTRAINT fk_invalidation_history FOREIGN KEY (invalidation_history_id) REFERENCES public.task_history(id); + + +-- +-- Name: task_invalidation_history fk_invalidators; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.task_invalidation_history + ADD CONSTRAINT fk_invalidators FOREIGN KEY (invalidator_id) REFERENCES public.users(id); + + +-- +-- Name: projects fk_licenses; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.projects + ADD CONSTRAINT fk_licenses FOREIGN KEY (license_id) REFERENCES public.licenses(id); + + +-- +-- Name: task_invalidation_history fk_mappers; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.task_invalidation_history + ADD CONSTRAINT fk_mappers FOREIGN KEY (mapper_id) REFERENCES public.users(id); + + +-- +-- Name: messages fk_message_projects; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.messages + ADD CONSTRAINT fk_message_projects FOREIGN KEY (project_id) REFERENCES public.projects(id); + + +-- +-- Name: task_history fk_tasks; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.task_history + ADD CONSTRAINT fk_tasks FOREIGN KEY (task_id, project_id) REFERENCES public.tasks(id, project_id); + + +-- +-- Name: task_invalidation_history fk_tasks; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.task_invalidation_history + ADD CONSTRAINT fk_tasks FOREIGN KEY (task_id, project_id) REFERENCES public.tasks(id, project_id); + + +-- +-- Name: projects fk_users; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.projects + ADD CONSTRAINT fk_users FOREIGN KEY (author_id) REFERENCES public.users(id); + + +-- +-- Name: task_history fk_users; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.task_history + ADD CONSTRAINT fk_users FOREIGN KEY (user_id) REFERENCES public.users(id); + + +-- +-- Name: tasks fk_users_locked; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.tasks + ADD CONSTRAINT fk_users_locked FOREIGN KEY (locked_by) REFERENCES public.users(id); + + +-- +-- Name: tasks fk_users_mapper; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.tasks + ADD CONSTRAINT fk_users_mapper FOREIGN KEY (mapped_by) REFERENCES public.users(id); + + +-- +-- Name: tasks fk_users_validator; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.tasks + ADD CONSTRAINT fk_users_validator FOREIGN KEY (validated_by) REFERENCES public.users(id); + + +-- +-- Name: task_invalidation_history fk_validators; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.task_invalidation_history + ADD CONSTRAINT fk_validators FOREIGN KEY (validator_id) REFERENCES public.users(id); + + +-- +-- Name: messages messages_from_user_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.messages + ADD CONSTRAINT messages_from_user_id_fkey FOREIGN KEY (from_user_id) REFERENCES public.users(id); + + +-- +-- Name: messages messages_to_user_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.messages + ADD CONSTRAINT messages_to_user_id_fkey FOREIGN KEY (to_user_id) REFERENCES public.users(id); + + +-- +-- Name: project_allowed_users project_allowed_users_project_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.project_allowed_users + ADD CONSTRAINT project_allowed_users_project_id_fkey FOREIGN KEY (project_id) REFERENCES public.projects(id); + + +-- +-- Name: project_allowed_users project_allowed_users_user_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.project_allowed_users + ADD CONSTRAINT project_allowed_users_user_id_fkey FOREIGN KEY (user_id) REFERENCES public.users(id); + + +-- +-- Name: project_chat project_chat_project_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.project_chat + ADD CONSTRAINT project_chat_project_id_fkey FOREIGN KEY (project_id) REFERENCES public.projects(id); + + +-- +-- Name: project_chat project_chat_user_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.project_chat + ADD CONSTRAINT project_chat_user_id_fkey FOREIGN KEY (user_id) REFERENCES public.users(id); + + +-- +-- Name: project_info project_info_project_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.project_info + ADD CONSTRAINT project_info_project_id_fkey FOREIGN KEY (project_id) REFERENCES public.projects(id); + + +-- +-- Name: project_priority_areas project_priority_areas_priority_area_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.project_priority_areas + ADD CONSTRAINT project_priority_areas_priority_area_id_fkey FOREIGN KEY (priority_area_id) REFERENCES public.priority_areas(id); + + +-- +-- Name: project_priority_areas project_priority_areas_project_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.project_priority_areas + ADD CONSTRAINT project_priority_areas_project_id_fkey FOREIGN KEY (project_id) REFERENCES public.projects(id); + + +-- +-- Name: task_history task_history_project_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.task_history + ADD CONSTRAINT task_history_project_id_fkey FOREIGN KEY (project_id) REFERENCES public.projects(id); + + +-- +-- Name: task_invalidation_history task_invalidation_history_project_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.task_invalidation_history + ADD CONSTRAINT task_invalidation_history_project_id_fkey FOREIGN KEY (project_id) REFERENCES public.projects(id); + + +-- +-- Name: tasks tasks_project_id_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.tasks + ADD CONSTRAINT tasks_project_id_fkey FOREIGN KEY (project_id) REFERENCES public.projects(id); + + +-- +-- Name: users_licenses users_licenses_license_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.users_licenses + ADD CONSTRAINT users_licenses_license_fkey FOREIGN KEY (license) REFERENCES public.licenses(id); + + +-- +-- Name: users_licenses users_licenses_user_fkey; Type: FK CONSTRAINT; Schema: public; Owner: - +-- + +ALTER TABLE ONLY public.users_licenses + ADD CONSTRAINT users_licenses_user_fkey FOREIGN KEY ("user") REFERENCES public.users(id); + + +-- +-- PostgreSQL database dump complete +-- From a5c13ce66314a5499a2e1fe997a8ecb0aa207edf Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Fri, 30 Sep 2022 16:26:49 +0545 Subject: [PATCH 132/153] added docker config --- docs/CONFIG_DOC.md | 43 +++++++++++++++++++++++++++++++++++++++++-- 1 file changed, 41 insertions(+), 2 deletions(-) diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index 384a86c7..1b778059 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -7,6 +7,45 @@ Before getting started on config Make sure you have [Postgres](https://www.postg ### 1. Create ```config.txt``` inside src directory. ![image](https://user-images.githubusercontent.com/36752999/188402566-80dc9633-5d4e-479c-97dc-9e8a4999b385.png) +### For docker users : +If you want to use docker postgres Sample data for underpass, insights, taskingmanager, rawdata is included in db itself : +You can use following config and navigate to **Step 6** or Setup them by yourself by following instructions +``` +[INSIGHTS] +host=pgsql +user=postgres +password=admin +database=insights +port=5432 + +[UNDERPASS] +host=pgsql +user=postgres +password=admin +database=underpass +port=5432 + +[TM] +host=pgsql +user=postgres +password=admin +database=tm +port=5432 + +[RAW_DATA] +host=pgsql +user=postgres +password=admin +database=raw +port=5432 + +[API_CONFIG] +env=dev + +[CELERY] +CELERY_BROKER_URL=redis://redis:6379/0 +CELERY_RESULT_BACKEND=redis://redis:6379/0 +``` ### 2. Setup Underpass Run underpass from [here](https://github.com/hotosm/underpass/blob/master/doc/getting-started.md) OR Create database "underpass" in your local postgres and insert sample dump from @@ -74,10 +113,10 @@ port=5432 ### 5. Setup Tasking Manager Database for TM related development -Setup Tasking manager from [here](https://github.com/hotosm/tasking-manager/blob/develop/docs/developers/development-setup.md#backend) OR Create database "tm" in your local postgres and insert sample dump from [TM test dump](https://github.com/hotosm/tasking-manager/blob/develop/tests/database/tasking-manager.sql). +Setup Tasking manager from [here](https://github.com/hotosm/tasking-manager) OR Create database "tm" in your local postgres and insert sample dump from TM Sample Dump ``` -wget https://raw.githubusercontent.com/hotosm/tasking-manager/develop/tests/database/tasking-manager.sql +/tests/src/fixtures/tasking-manager.sql ``` ``` From f2bab05673f04fad44a21eb956d394cbeb9c2faa Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Fri, 30 Sep 2022 16:27:56 +0545 Subject: [PATCH 133/153] update port info --- docs/GETTING_STARTED_WITH_DOCKER.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/GETTING_STARTED_WITH_DOCKER.md b/docs/GETTING_STARTED_WITH_DOCKER.md index d923c659..fb3ec830 100644 --- a/docs/GETTING_STARTED_WITH_DOCKER.md +++ b/docs/GETTING_STARTED_WITH_DOCKER.md @@ -14,7 +14,7 @@ docker-compose up -d --build ### 3. Check Servers -Uvicorn should be running on 8000 port , Redis on default port , Celery with a worker and Flower on 5550 +Uvicorn should be running on 8000 port , Redis on default port , Celery with a worker and Flower on 5000 ``` http://127.0.0.1:8000/latest/docs @@ -23,7 +23,7 @@ API Docs will be displayed like this upon uvicorn successfull server start ![image](https://user-images.githubusercontent.com/36752999/191813795-fdfd46fe-5e6c-4ecf-be9b-f9f351d3d1d7.png) ``` -http://127.0.0.1:5550/ +http://127.0.0.1:5000/ ``` Flower dashboard will look like this on successfull installation with a worker online From 2254bae070c2e01dcb0baef57c9e15de501cfb62 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Fri, 30 Sep 2022 16:28:24 +0545 Subject: [PATCH 134/153] changed port for flower --- docker-compose.yml | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docker-compose.yml b/docker-compose.yml index cf59a672..bab82ab1 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -50,9 +50,9 @@ services: worker-dashboard: build: . container_name: flower - command: celery --app API.api_worker flower --port=5550 --broker=redis://redis:6379/ + command: celery --app API.api_worker flower --port=5000 --broker=redis://redis:6379/ ports: - - 5550:5550 + - 5000:5000 depends_on: - app - redis From 8d50ded80a3e9fb6f8a9be1af81ac6b4bd979f75 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Fri, 30 Sep 2022 16:29:40 +0545 Subject: [PATCH 135/153] Update CONFIG_DOC.md --- docs/CONFIG_DOC.md | 39 --------------------------------------- 1 file changed, 39 deletions(-) diff --git a/docs/CONFIG_DOC.md b/docs/CONFIG_DOC.md index 1b778059..73382a72 100644 --- a/docs/CONFIG_DOC.md +++ b/docs/CONFIG_DOC.md @@ -7,45 +7,6 @@ Before getting started on config Make sure you have [Postgres](https://www.postg ### 1. Create ```config.txt``` inside src directory. ![image](https://user-images.githubusercontent.com/36752999/188402566-80dc9633-5d4e-479c-97dc-9e8a4999b385.png) -### For docker users : -If you want to use docker postgres Sample data for underpass, insights, taskingmanager, rawdata is included in db itself : -You can use following config and navigate to **Step 6** or Setup them by yourself by following instructions -``` -[INSIGHTS] -host=pgsql -user=postgres -password=admin -database=insights -port=5432 - -[UNDERPASS] -host=pgsql -user=postgres -password=admin -database=underpass -port=5432 - -[TM] -host=pgsql -user=postgres -password=admin -database=tm -port=5432 - -[RAW_DATA] -host=pgsql -user=postgres -password=admin -database=raw -port=5432 - -[API_CONFIG] -env=dev - -[CELERY] -CELERY_BROKER_URL=redis://redis:6379/0 -CELERY_RESULT_BACKEND=redis://redis:6379/0 -``` ### 2. Setup Underpass Run underpass from [here](https://github.com/hotosm/underpass/blob/master/doc/getting-started.md) OR Create database "underpass" in your local postgres and insert sample dump from From a024dadec0f87b427f9f84db9ac8e398013db19b Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Fri, 30 Sep 2022 16:35:13 +0545 Subject: [PATCH 136/153] Added config for docker setup --- docs/GETTING_STARTED_WITH_DOCKER.md | 50 ++++++++++++++++++++++++++++- 1 file changed, 49 insertions(+), 1 deletion(-) diff --git a/docs/GETTING_STARTED_WITH_DOCKER.md b/docs/GETTING_STARTED_WITH_DOCKER.md index fb3ec830..345d5654 100644 --- a/docs/GETTING_STARTED_WITH_DOCKER.md +++ b/docs/GETTING_STARTED_WITH_DOCKER.md @@ -4,8 +4,56 @@ git clone https://github.com/hotosm/galaxy-api.git ``` -Follow [instructions](../docs/CONFIG_DOC.md) and create config.txt inside /src/ +- Create config.txt inside /src/ +``` +touch src/config.txt +``` + +- Put those config block inside your file + +If you want to use docker postgres Sample data for underpass, insights, taskingmanager, rawdata is included in db itself : +You can use following config to get started with sample data or Setup them by yourself by following [instructions](../docs/CONFIG_DOC.md) +``` +[INSIGHTS] +host=pgsql +user=postgres +password=admin +database=insights +port=5432 + +[UNDERPASS] +host=pgsql +user=postgres +password=admin +database=underpass +port=5432 + +[TM] +host=pgsql +user=postgres +password=admin +database=tm +port=5432 + +[RAW_DATA] +host=pgsql +user=postgres +password=admin +database=raw +port=5432 + +[API_CONFIG] +env=dev + +[CELERY] +CELERY_BROKER_URL=redis://redis:6379/0 +CELERY_RESULT_BACKEND=redis://redis:6379/0 +``` + +- **Setup Authentication** + + Follow this [Setup Oauth Block](../docs/CONFIG_DOC.md#6-setup-oauth-for-authentication) and include it in your config.txt ### 2. Create the images and spin up the Docker containers: ``` From f504b65837a737842dad54fc07e36dc0089a3780 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 30 Sep 2022 18:18:46 +0545 Subject: [PATCH 137/153] changed port to 4000 --- README.md | 6 +++--- docker-compose.yml | 4 ++-- docs/GETTING_STARTED_WITH_DOCKER.md | 14 +++++++------- 3 files changed, 12 insertions(+), 12 deletions(-) diff --git a/README.md b/README.md index 1173c48d..9d9b4aea 100644 --- a/README.md +++ b/README.md @@ -76,7 +76,7 @@ celery --app API.api_worker worker --loglevel=INFO API uses flower for monitoring the Celery distributed queue. Run this command on different shell , if you are running redis on same machine your broker could be ```redis://localhost:6379/``` ``` -celery --app API.api_worker flower --port=5000 --broker=redis://redis:6379/ +celery --app API.api_worker flower --port=4000 --broker=redis://redis:6379/ ``` ### 6. Navigate to Fast API Docs to get details about API Endpoint @@ -87,10 +87,10 @@ After sucessfully running server , hit [this](http://127.0.0.1:8000/latest/docs) http://127.0.0.1:8000/latest/docs ``` -Flower dashboard should be available on 5000 localhost port. +Flower dashboard should be available on 4000 localhost port. ``` -http://127.0.0.1:5000/ +http://127.0.0.1:4000/ ``` ## Check API Installation diff --git a/docker-compose.yml b/docker-compose.yml index bab82ab1..c30b04ec 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -50,9 +50,9 @@ services: worker-dashboard: build: . container_name: flower - command: celery --app API.api_worker flower --port=5000 --broker=redis://redis:6379/ + command: celery --app API.api_worker flower --port=4000 --broker=redis://redis:6379/ ports: - - 5000:5000 + - 4000:4000 depends_on: - app - redis diff --git a/docs/GETTING_STARTED_WITH_DOCKER.md b/docs/GETTING_STARTED_WITH_DOCKER.md index 345d5654..27488b17 100644 --- a/docs/GETTING_STARTED_WITH_DOCKER.md +++ b/docs/GETTING_STARTED_WITH_DOCKER.md @@ -10,9 +10,9 @@ git clone https://github.com/hotosm/galaxy-api.git touch src/config.txt ``` -- Put those config block inside your file +- Put those config block inside your file -If you want to use docker postgres Sample data for underpass, insights, taskingmanager, rawdata is included in db itself : +If you want to use docker postgres Sample data for underpass, insights, taskingmanager, rawdata is included in db itself : You can use following config to get started with sample data or Setup them by yourself by following [instructions](../docs/CONFIG_DOC.md) ``` [INSIGHTS] @@ -51,9 +51,9 @@ CELERY_BROKER_URL=redis://redis:6379/0 CELERY_RESULT_BACKEND=redis://redis:6379/0 ``` -- **Setup Authentication** - - Follow this [Setup Oauth Block](../docs/CONFIG_DOC.md#6-setup-oauth-for-authentication) and include it in your config.txt +- **Setup Authentication** + + Follow this [Setup Oauth Block](../docs/CONFIG_DOC.md#6-setup-oauth-for-authentication) and include it in your config.txt ### 2. Create the images and spin up the Docker containers: ``` @@ -62,7 +62,7 @@ docker-compose up -d --build ### 3. Check Servers -Uvicorn should be running on 8000 port , Redis on default port , Celery with a worker and Flower on 5000 +Uvicorn should be running on 8000 port , Redis on default port , Celery with a worker and Flower on 4000 ``` http://127.0.0.1:8000/latest/docs @@ -71,7 +71,7 @@ API Docs will be displayed like this upon uvicorn successfull server start ![image](https://user-images.githubusercontent.com/36752999/191813795-fdfd46fe-5e6c-4ecf-be9b-f9f351d3d1d7.png) ``` -http://127.0.0.1:5000/ +http://127.0.0.1:4000/ ``` Flower dashboard will look like this on successfull installation with a worker online From 0fbffeea533f577ba86ed4e1b758859f6a54e187 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 30 Sep 2022 18:58:41 +0545 Subject: [PATCH 138/153] fixed chmod command --- Dockerfile | 9 ++------- docker-compose.yml | 29 +++++++++++++++-------------- docker-multiple-db.sh | 0 populate-docker-db.sh | 0 4 files changed, 17 insertions(+), 21 deletions(-) mode change 100644 => 100755 docker-multiple-db.sh mode change 100644 => 100755 populate-docker-db.sh diff --git a/Dockerfile b/Dockerfile index cd6b7bc0..5870b5e9 100644 --- a/Dockerfile +++ b/Dockerfile @@ -8,14 +8,9 @@ RUN apt-get update && apt-get -y upgrade && \ RUN mkdir /app COPY requirements.docker.txt /app/requirements.docker.txt -COPY populate-docker-db.sh /docker-entrypoint-initdb.d/ -COPY docker-multiple-db.sh /docker-entrypoint-initdb.d/ -COPY /tests/src/fixtures/insights.sql /insights.sql -COPY /tests/src/fixtures/mapathon_summary.sql /mapathon_summary.sql -COPY /tests/src/fixtures/raw_data.sql /raw_data.sql -COPY /tests/src/fixtures/underpass.sql /underpass.sql -COPY /tests/src/fixtures/tasking-manager.sql /tasking-manager.sql +RUN chmod +x docker-multiple-db.sh +RUN chmod +x populate-docker-db.sh COPY setup.py /app/setup.py diff --git a/docker-compose.yml b/docker-compose.yml index c30b04ec..71768db6 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -2,18 +2,6 @@ version: '3.8' services: - app: - build: . - container_name: api - command: uvicorn API.main:app --reload --host 0.0.0.0 --port 8000 --no-use-colors --proxy-headers - ports: - - 8000:8000 - volumes: - - .:/app - depends_on: - - redis - - postgres - postgres: image: postgis/postgis container_name: pgsql @@ -26,8 +14,21 @@ services: volumes: - ./tests/src/fixtures/:/sql/ - ./postgres-data:/var/lib/postgresql/data - - ./docker-multiple-db.sh:/docker-entrypoint-initdb.d/docker-multiple-db.sh - - ./populate-docker-db.sh:/docker-entrypoint-initdb.d/populate-docker-db.sh + - ./docker-multiple-db.sh:/docker-entrypoint-initdb.d/create.sh + - ./populate-docker-db.sh:/docker-entrypoint-initdb.d/insert.sh + + + app: + build: . + container_name: api + command: uvicorn API.main:app --reload --host 0.0.0.0 --port 8000 --no-use-colors --proxy-headers + ports: + - 8000:8000 + volumes: + - .:/app + depends_on: + - redis + - postgres worker: diff --git a/docker-multiple-db.sh b/docker-multiple-db.sh old mode 100644 new mode 100755 diff --git a/populate-docker-db.sh b/populate-docker-db.sh old mode 100644 new mode 100755 From 3671987b6cbdc968a3ffe5b19e5f04ac340fe27a Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 30 Sep 2022 19:00:58 +0545 Subject: [PATCH 139/153] added ref sh --- Dockerfile | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/Dockerfile b/Dockerfile index 5870b5e9..f85cbb27 100644 --- a/Dockerfile +++ b/Dockerfile @@ -9,8 +9,8 @@ RUN apt-get update && apt-get -y upgrade && \ RUN mkdir /app COPY requirements.docker.txt /app/requirements.docker.txt -RUN chmod +x docker-multiple-db.sh -RUN chmod +x populate-docker-db.sh +RUN chmod +x ./docker-multiple-db.sh +RUN chmod +x ./populate-docker-db.sh COPY setup.py /app/setup.py From 8f5190d3c7653406362c410e65b22ef682dc2481 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 30 Sep 2022 19:03:30 +0545 Subject: [PATCH 140/153] added link to readme --- docs/GETTING_STARTED_WITH_DOCKER.md | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/docs/GETTING_STARTED_WITH_DOCKER.md b/docs/GETTING_STARTED_WITH_DOCKER.md index 27488b17..87b7de69 100644 --- a/docs/GETTING_STARTED_WITH_DOCKER.md +++ b/docs/GETTING_STARTED_WITH_DOCKER.md @@ -62,7 +62,7 @@ docker-compose up -d --build ### 3. Check Servers -Uvicorn should be running on 8000 port , Redis on default port , Celery with a worker and Flower on 4000 +Uvicorn should be running on [8000](http://127.0.0.1:8000/latest/docs) port , Redis on default port , Celery with a worker and Flower on 4000 ``` http://127.0.0.1:8000/latest/docs @@ -74,7 +74,7 @@ API Docs will be displayed like this upon uvicorn successfull server start http://127.0.0.1:4000/ ``` -Flower dashboard will look like this on successfull installation with a worker online +Flower [dashboard](http://127.0.0.1:4000/) will look like this on successfull installation with a worker online ![image](https://user-images.githubusercontent.com/36752999/191813613-3859522b-ea68-4370-87b2-ebd1d8880d80.png) From 71b339be88d35731706a21b22c0d227a2cf619b7 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Fri, 30 Sep 2022 19:11:56 +0545 Subject: [PATCH 141/153] added troubleshoot for executable .sh --- docs/GETTING_STARTED_WITH_DOCKER.md | 11 +++++++++++ 1 file changed, 11 insertions(+) diff --git a/docs/GETTING_STARTED_WITH_DOCKER.md b/docs/GETTING_STARTED_WITH_DOCKER.md index 87b7de69..59b88464 100644 --- a/docs/GETTING_STARTED_WITH_DOCKER.md +++ b/docs/GETTING_STARTED_WITH_DOCKER.md @@ -96,3 +96,14 @@ Since API is running through container, If you have local postgres installed on Find your network ip address (for linux/mac you can use ```ifconfig -l | xargs -n1 ipconfig getifaddr``` ) and use your ip as a host instead of localhost in config file . If connection still fails : You may need to edit your postgres config file ( ask postgres where it is by this query ```show config_file;``` ) and edit/enable ```listen_addresses = '*'``` inside ```postgresql.conf``` . Also add ```host all all 0.0.0.0/0 trust``` in ```pg_hba.conf``` + +### [Troubleshoot] If you can't run postgresql on docker to execute .sh script provided + +Make your .sh script executable . For eg : In ubuntu/mac + +``` +chmod +x populate-docker-db.sh +``` +``` +chmod +x docker-multiple-db.sh +``` \ No newline at end of file From f94d5a63df1c3951731dbe2ed9bbc5ab19f91551 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Fri, 30 Sep 2022 19:13:54 +0545 Subject: [PATCH 142/153] Update GETTING_STARTED_WITH_DOCKER.md --- docs/GETTING_STARTED_WITH_DOCKER.md | 5 +---- 1 file changed, 1 insertion(+), 4 deletions(-) diff --git a/docs/GETTING_STARTED_WITH_DOCKER.md b/docs/GETTING_STARTED_WITH_DOCKER.md index 59b88464..d56d140e 100644 --- a/docs/GETTING_STARTED_WITH_DOCKER.md +++ b/docs/GETTING_STARTED_WITH_DOCKER.md @@ -102,8 +102,5 @@ Since API is running through container, If you have local postgres installed on Make your .sh script executable . For eg : In ubuntu/mac ``` -chmod +x populate-docker-db.sh +chmod +x populate-docker-db.sh && chmod +x docker-multiple-db.sh ``` -``` -chmod +x docker-multiple-db.sh -``` \ No newline at end of file From 23d9982995dd64251d2f8e62b6a9eab10c7e97f2 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Fri, 30 Sep 2022 19:36:15 +0545 Subject: [PATCH 143/153] formatted readme --- README.md | 38 +++++++++++++++++++------------------- 1 file changed, 19 insertions(+), 19 deletions(-) diff --git a/README.md b/README.md index 9d9b4aea..c1278d07 100644 --- a/README.md +++ b/README.md @@ -94,36 +94,36 @@ http://127.0.0.1:4000/ ``` ## Check API Installation -### Check Authetication +- Check Mapathon Summary : -1. Hit /auth/login/ -2. Hit Url returned on response -3. You will get access_token -4. You can use that access_token in all endpoints that requires authentication , To check token pass token in /auth/me/ It should return your osm profile + ``` + curl -d '{"project_ids": [11224, 10042, 9906, 1381, 11203, 10681, 8055, 8732, 11193, 7305,11210, 10985, 10988, 11190, 6658, 5644, 10913, 6495, 4229],"fromTimestamp":"2021-08-27T9:00:00","toTimestamp":"2021-08-27T11:00:00","hashtags": ["mapandchathour2021"]}' -H 'Content-Type: application/json' http://127.0.0.1:8000/v1/mapathon/summary/ + ``` + It should return some stats -If you get a 401 response with the detail "User is not staff member", get your OSM id using https://galaxy-api.hotosm.org/v1/docs#/default/get_user_id_osm_users_ids__post, then run the following SQL on underpass database replacing ID: +- Check Authetication : -```sql -INSERT INTO users_roles VALUES (ID, 1); -``` + 1. Hit /auth/login/ + 2. Hit Url returned on response + 3. You will get access_token + 4. You can use that access_token in all endpoints that requires authentication , To check token pass token in /auth/me/ It should return your osm profile -Repeat the steps to get a new access_token. + If you get a 401 response with the detail "User is not staff member", get your OSM id using https://galaxy-api.hotosm.org/v1/docs#/default/get_user_id_osm_users_ids__post, then run the following SQL on underpass database replacing ID: -Check endpoints : + ```sql + INSERT INTO users_roles VALUES (ID, 1); + ``` -- Check Mapathon Summary : + Repeat the steps to get a new access_token. -``` -curl -d '{"project_ids": [11224, 10042, 9906, 1381, 11203, 10681, 8055, 8732, 11193, 7305,11210, 10985, 10988, 11190, 6658, 5644, 10913, 6495, 4229],"fromTimestamp":"2021-08-27T9:00:00","toTimestamp":"2021-08-27T11:00:00","hashtags": ["mapandchathour2021"]}' -H 'Content-Type: application/json' http://127.0.0.1:8000/v1/mapathon/summary/ -``` - It should return some stats - Check Mapathon detailed report : + You can test with the `/mapathon/detail/` endpoint with the following input to check both authentication , database connection and visualize the above summary result -``` -{"project_ids": [11224, 10042, 9906, 1381, 11203, 10681, 8055, 8732, 11193, 7305,11210, 10985, 10988, 11190, 6658, 5644, 10913, 6495, 4229],"fromTimestamp":"2021-08-27T9:00:00","toTimestamp":"2021-08-27T11:00:00","hashtags": ["mapandchathour2021"]} -``` + ``` + {"project_ids": [11224, 10042, 9906, 1381, 11203, 10681, 8055, 8732, 11193, 7305,11210, 10985, 10988, 11190, 6658, 5644, 10913, 6495, 4229],"fromTimestamp":"2021-08-27T9:00:00","toTimestamp":"2021-08-27T11:00:00","hashtags": ["mapandchathour2021"]} + ``` Clean Setup of API can be found in github action workflow , You can follow the steps for more [clarity](/.github/workflows/build.yml). ```/workflows/build.yml``` From 722dc69868fede7a1e37de51dd5e2a308a6614d8 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Mon, 3 Oct 2022 14:30:10 +0545 Subject: [PATCH 144/153] added default output type as geojson from worker --- API/api_worker.py | 3 ++- src/galaxy/app.py | 7 +------ 2 files changed, 3 insertions(+), 7 deletions(-) diff --git a/API/api_worker.py b/API/api_worker.py index 9d6acd54..f170df25 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -33,6 +33,7 @@ def process_raw_data(self, params): start_time = dt.now() bind_zip=params.bind_zip if allow_bind_zip_filter else True # unique id for zip file and geojson for each export + params.output_type = params.output_type if params.output_type else RawDataOutputType.GEOJSON.value params.file_name=format_file_name_str(params.file_name) if params.file_name else 'Export' exportname = f"{params.file_name}_{str(self.request.id)}_{params.output_type}" @@ -79,7 +80,7 @@ def process_raw_data(self, params): response_time = dt.now() - start_time response_time_str = str(response_time) logging.info(f"Done Export : {exportname} of {round(inside_file_size/1000000)} MB / {geom_area} sqkm in {response_time_str}") - return {"download_url": download_url, "file_name": exportname, "process_time": response_time_str, "query_area": f"{geom_area} Sq Km ", "binded_file_size": f"{round(inside_file_size/1000000,2)} MB", "zip_file_size_bytes": zip_file_size} + return {"download_url": download_url, "file_name": params.file_name, "process_time": response_time_str, "query_area": f"{geom_area} Sq Km ", "binded_file_size": f"{round(inside_file_size/1000000,2)} MB", "zip_file_size_bytes": zip_file_size} except Exception as ex: raise ex diff --git a/src/galaxy/app.py b/src/galaxy/app.py index d1234fd6..7e185777 100644 --- a/src/galaxy/app.py +++ b/src/galaxy/app.py @@ -1057,12 +1057,7 @@ def extract_current_data(self, exportname): # first check either geometry needs grid or not for querying grid_id, geometry_dump, geom_area = RawData.get_grid_id( self.params.geometry, self.cur) - if self.params.output_type is None: - # if nothing is supplied then default output type will be geojson - output_type = RawDataOutputType.GEOJSON.value - else: - output_type = self.params.output_type - + output_type = self.params.output_type # Check whether the export path exists or not working_dir=os.path.join(export_path, exportname) if not os.path.exists(working_dir): From 411291d666a12aeaf2f5a5b520e80a4982c6b918 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Mon, 3 Oct 2022 14:35:27 +0545 Subject: [PATCH 145/153] reformatted response --- API/api_worker.py | 2 +- src/galaxy/app.py | 4 ++-- 2 files changed, 3 insertions(+), 3 deletions(-) diff --git a/API/api_worker.py b/API/api_worker.py index f170df25..f8c0a786 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -80,7 +80,7 @@ def process_raw_data(self, params): response_time = dt.now() - start_time response_time_str = str(response_time) logging.info(f"Done Export : {exportname} of {round(inside_file_size/1000000)} MB / {geom_area} sqkm in {response_time_str}") - return {"download_url": download_url, "file_name": params.file_name, "process_time": response_time_str, "query_area": f"{geom_area} Sq Km ", "binded_file_size": f"{round(inside_file_size/1000000,2)} MB", "zip_file_size_bytes": zip_file_size} + return {"download_url": download_url, "file_name": params.file_name, "process_time": response_time_str, "query_area": f"{round(geom_area,2)} Sq Km", "binded_file_size": f"{round(inside_file_size/1000000,2)} MB", "zip_file_size_bytes": zip_file_size} except Exception as ex: raise ex diff --git a/src/galaxy/app.py b/src/galaxy/app.py index 7e185777..517bf487 100644 --- a/src/galaxy/app.py +++ b/src/galaxy/app.py @@ -1033,9 +1033,9 @@ def get_grid_id(geom, cur): """ geometry_dump = dumps(dict(geom)) # generating geometry area in sqkm - geom_area = int(area(json.loads(geom.json())) * 1E-6) + geom_area = area(json.loads(geom.json())) * 1E-6 # only apply grid in the logic if it exceeds the 5000 Sqkm - if geom_area > grid_index_threshold: + if int(geom_area) > grid_index_threshold: # this will be applied only when polygon gets bigger we will be slicing index size to search cur.execute( get_grid_id_query(geometry_dump)) From b95413397ca937ae9496fe5f5e02852bbca6a4cb Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Mon, 3 Oct 2022 14:37:58 +0545 Subject: [PATCH 146/153] reformatted response to galaxy_export --- API/api_worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/API/api_worker.py b/API/api_worker.py index f8c0a786..a0c3facd 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -34,7 +34,7 @@ def process_raw_data(self, params): bind_zip=params.bind_zip if allow_bind_zip_filter else True # unique id for zip file and geojson for each export params.output_type = params.output_type if params.output_type else RawDataOutputType.GEOJSON.value - params.file_name=format_file_name_str(params.file_name) if params.file_name else 'Export' + params.file_name=format_file_name_str(params.file_name) if params.file_name else 'Galaxy_Export' exportname = f"{params.file_name}_{str(self.request.id)}_{params.output_type}" logging.info("Request %s received", exportname) From 26125ec36600792b8bd03383dfd55c8c31aca597 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Mon, 3 Oct 2022 14:38:27 +0545 Subject: [PATCH 147/153] typo --- API/api_worker.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/API/api_worker.py b/API/api_worker.py index a0c3facd..a6809e18 100644 --- a/API/api_worker.py +++ b/API/api_worker.py @@ -34,7 +34,7 @@ def process_raw_data(self, params): bind_zip=params.bind_zip if allow_bind_zip_filter else True # unique id for zip file and geojson for each export params.output_type = params.output_type if params.output_type else RawDataOutputType.GEOJSON.value - params.file_name=format_file_name_str(params.file_name) if params.file_name else 'Galaxy_Export' + params.file_name=format_file_name_str(params.file_name) if params.file_name else 'Galaxy_export' exportname = f"{params.file_name}_{str(self.request.id)}_{params.output_type}" logging.info("Request %s received", exportname) From 471894d9f8201822db2e84d86379c501963637ad Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Tue, 4 Oct 2022 09:36:03 +0545 Subject: [PATCH 148/153] Update GETTING_STARTED_WITH_DOCKER.md --- docs/GETTING_STARTED_WITH_DOCKER.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/GETTING_STARTED_WITH_DOCKER.md b/docs/GETTING_STARTED_WITH_DOCKER.md index d56d140e..24d8e68f 100644 --- a/docs/GETTING_STARTED_WITH_DOCKER.md +++ b/docs/GETTING_STARTED_WITH_DOCKER.md @@ -104,3 +104,4 @@ Make your .sh script executable . For eg : In ubuntu/mac ``` chmod +x populate-docker-db.sh && chmod +x docker-multiple-db.sh ``` +In windows you can recreate the file and paste the content ! From 8bad59166ee8d95836c5653f417a375066a512cd Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma <36752999+kshitijrajsharma@users.noreply.github.com> Date: Tue, 4 Oct 2022 10:17:57 +0545 Subject: [PATCH 149/153] Update README.md --- README.md | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/README.md b/README.md index c1278d07..c4a23973 100644 --- a/README.md +++ b/README.md @@ -54,7 +54,7 @@ pip install -r requirements.txt ### 2. Setup required config for API -Make sure you have https://www.postgresql.org/ setup in your machine. +Make sure you have https://www.postgresql.org/ setup in your machine or you can use docker Setup necessary config for API from [docs/CONFIG.DOC](/docs/CONFIG_DOC.md) From 6597706a15479b6fccdf6374f59e4eb2772ef4ac Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Tue, 4 Oct 2022 19:22:12 +0545 Subject: [PATCH 150/153] fixed bug on select condition --- src/galaxy/query_builder/builder.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/src/galaxy/query_builder/builder.py b/src/galaxy/query_builder/builder.py index d374ec6f..d66a167e 100644 --- a/src/galaxy/query_builder/builder.py +++ b/src/galaxy/query_builder/builder.py @@ -1066,7 +1066,6 @@ def extract_attributes_tags(filters): poly_attribute_filter = v if k == SupportedGeometryFilters.ALLGEOM.value: master_attribute_filter = v - return tags, attributes, point_attribute_filter, line_attribute_filter, poly_attribute_filter, master_attribute_filter, point_tag_filter, line_tag_filter, poly_tag_filter, master_tag_filter @@ -1095,7 +1094,6 @@ def raw_currentdata_extraction_query(params, g_id, geometry_dump, ogr_export=Fal point_select_condition = select_condition # initializing default line_select_condition = select_condition poly_select_condition = select_condition - if params.filters: tags, attributes, point_attribute_filter, line_attribute_filter, poly_attribute_filter, master_attribute_filter, point_tag_filter, line_tag_filter, poly_tag_filter, master_tag_filter = extract_attributes_tags( params.filters) @@ -1118,7 +1116,7 @@ def raw_currentdata_extraction_query(params, g_id, geometry_dump, ogr_export=Fal line_select_condition = create_column_filter( line_attribute_filter) if poly_attribute_filter: - if len(line_attribute_filter) > 0: + if len(poly_attribute_filter) > 0: poly_select_condition = create_column_filter( point_attribute_filter) if tags: From d2527a217e5c14b5414cd41126313fc94e8a483e Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Tue, 4 Oct 2022 20:19:10 +0545 Subject: [PATCH 151/153] fixed select query for relation --- src/galaxy/query_builder/builder.py | 3 ++- 1 file changed, 2 insertions(+), 1 deletion(-) diff --git a/src/galaxy/query_builder/builder.py b/src/galaxy/query_builder/builder.py index d66a167e..3996dea1 100644 --- a/src/galaxy/query_builder/builder.py +++ b/src/galaxy/query_builder/builder.py @@ -1119,6 +1119,7 @@ def raw_currentdata_extraction_query(params, g_id, geometry_dump, ogr_export=Fal if len(poly_attribute_filter) > 0: poly_select_condition = create_column_filter( point_attribute_filter) + print(poly_select_condition) if tags: if master_tag_filter: # if master tag is supplied then other tags should be ignored and master tag will be used master_tag = generate_tag_filter_query(master_tag_filter, params) @@ -1194,7 +1195,7 @@ def raw_currentdata_extraction_query(params, g_id, geometry_dump, ogr_export=Fal query_ways_poly += f""" and ({poly_tag})""" base_query.append(query_ways_poly) query_relations_poly = f"""select - {select_condition} + {poly_select_condition} from relations where From 1624014393d50aeac0073b0be98853eee5d5bf44 Mon Sep 17 00:00:00 2001 From: itskshitiz321 Date: Wed, 5 Oct 2022 13:34:57 +0545 Subject: [PATCH 152/153] changed rawdata response type --- API/raw_data.py | 3 +-- 1 file changed, 1 insertion(+), 2 deletions(-) diff --git a/API/raw_data.py b/API/raw_data.py index eb651926..2eac4659 100644 --- a/API/raw_data.py +++ b/API/raw_data.py @@ -267,8 +267,7 @@ def get_current_snapshot_osm_data(params: RawDataCurrentParams, background_tasks def check_current_db_status(): """Gives status about DB update, Substracts with current time and last db update time""" result = RawData().check_status() - response = f"{result} ago" - return {"last_updated": response} + return {"last_updated": result} def remove_file(path: str) -> None: From f3a4d863b071d3e874031c453da64f17a6afd5b2 Mon Sep 17 00:00:00 2001 From: Kshitij Raj Sharma Date: Mon, 10 Oct 2022 19:47:22 +0545 Subject: [PATCH 153/153] changed to timestamp rather than difference --- API/raw_data.py | 2 +- src/galaxy/query_builder/builder.py | 2 +- 2 files changed, 2 insertions(+), 2 deletions(-) diff --git a/API/raw_data.py b/API/raw_data.py index 2eac4659..9ad0a746 100644 --- a/API/raw_data.py +++ b/API/raw_data.py @@ -265,7 +265,7 @@ def get_current_snapshot_osm_data(params: RawDataCurrentParams, background_tasks @router.get("/status/") @version(1) def check_current_db_status(): - """Gives status about DB update, Substracts with current time and last db update time""" + """Gives status about how recent the osm data is , it will give the last time that database was updated completely""" result = RawData().check_status() return {"last_updated": result} diff --git a/src/galaxy/query_builder/builder.py b/src/galaxy/query_builder/builder.py index 3996dea1..e00e4705 100644 --- a/src/galaxy/query_builder/builder.py +++ b/src/galaxy/query_builder/builder.py @@ -1219,7 +1219,7 @@ def raw_currentdata_extraction_query(params, g_id, geometry_dump, ogr_export=Fal def check_last_updated_rawdata(): - query = """select NOW()-importdate as last_updated from planet_osm_replication_status""" + query = """select importdate as last_updated from planet_osm_replication_status""" return query