Skip to content

Commit

Permalink
Wrap external calls with try-except (#67)
Browse files Browse the repository at this point in the history
* wrap external calls with try-except

* remove print_error function
  • Loading branch information
andela-mmakinde authored and DavidLemayian committed Nov 15, 2017
1 parent a6aeeb9 commit 09df6c0
Show file tree
Hide file tree
Showing 5 changed files with 53 additions and 33 deletions.
4 changes: 3 additions & 1 deletion healthtools/__init__.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,6 @@
import logging

# loggers.
logging.basicConfig(level=logging.DEBUG)
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(name)-12s %(levelname)-8s %(message)s',
datefmt="%Y-%m-%d %H:%M:%S")
20 changes: 13 additions & 7 deletions healthtools/search/elastic.py
Original file line number Diff line number Diff line change
@@ -1,14 +1,20 @@
import logging
from healthtools.core import es, es_index

log = logging.getLogger(__name__)

def search(query, doc_type):
result = es.search(
index=es_index,
body={'query': match_all_text(query)},
doc_type=doc_type
)
hits = result.get('hits', {})
return hits
try:
result = es.search(
index=es_index,
body={'query': match_all_text(query)},
doc_type=doc_type
)

hits = result.get('hits', {})
return hits
except Exception as err:
log.error("Error fetching data from elastic search \n" + str(err))


def match_all():
Expand Down
38 changes: 21 additions & 17 deletions healthtools/search/nurses.py
Original file line number Diff line number Diff line change
@@ -1,10 +1,12 @@
import requests
import logging
from bs4 import BeautifulSoup


NURSING_COUNCIL_URL = 'http://nckenya.com/services/search.php?p=1&s={}'
NURSES_FIELDS = ['name', 'licence_no', 'valid_till']

log = logging.getLogger(__name__)

def search(query):
results = get_nurses_from_nc_registry(query)
Expand All @@ -16,26 +18,28 @@ def get_nurses_from_nc_registry(query):
Get nurses from the nursing council of Kenya registry
'''
url = NURSING_COUNCIL_URL.format(query)
response = requests.get(url)
nurses = {'hits': [], 'total': 0}
try:
response = requests.get(url)
if 'No results' in response.content:
return nurses

if 'No results' in response.content:
return nurses

# make soup for parsing out of response and get the table
soup = BeautifulSoup(response.content, 'html.parser')
table = soup.find('table', {'class': 'zebra'}).find('tbody')
rows = table.find_all("tr")
# make soup for parsing out of response and get the table
soup = BeautifulSoup(response.content, 'html.parser')
table = soup.find('table', {'class': 'zebra'}).find('tbody')
rows = table.find_all("tr")

# parse table for the nurses data
for row in rows:
# only the columns we want
columns = row.find_all('td')[:len(NURSES_FIELDS)]
columns = [text.text.strip() for text in columns]
# parse table for the nurses data
for row in rows:
# only the columns we want
columns = row.find_all('td')[:len(NURSES_FIELDS)]
columns = [text.text.strip() for text in columns]

entry = dict(zip(NURSES_FIELDS, columns))
nurses['hits'].append(entry)
entry = dict(zip(NURSES_FIELDS, columns))
nurses['hits'].append(entry)

nurses['total'] = len(nurses['hits'])
nurses['total'] = len(nurses['hits'])

return nurses
return nurses
except Exception as err:
log.error("Error getting nurses from the nursing council url \n" + str(err))
5 changes: 3 additions & 2 deletions healthtools/search/query.py
Original file line number Diff line number Diff line change
@@ -1,11 +1,12 @@
import logging
from wit import Wit
from nested_lookup import nested_lookup
from healthtools.settings import WIT_ACCESS_TOKEN

from healthtools.documents import DOCUMENTS, doc_exists

from healthtools.search import elastic, nurses

log = logging.getLogger(__name__)

def run_query(query, doc_type=None):

Expand All @@ -18,7 +19,6 @@ def run_query(query, doc_type=None):
else:
search_type = 'nurses'
return doc_type, search_type

doc_type, search_type = determine_doc_type(query, doc_type)

if not doc_type:
Expand Down Expand Up @@ -55,6 +55,7 @@ def determine_doc_type(query, doc_type=None):
for keyword in DOCUMENTS[doc]['keywords']:
if query.startswith(keyword + ' '):
return doc, DOCUMENTS[doc]['search_type']
log.error("doc_type could not be determined from query\n Query: " + query)
return False, False


Expand Down
19 changes: 13 additions & 6 deletions healthtools/views/search_api.py
Original file line number Diff line number Diff line change
@@ -1,25 +1,32 @@
import logging
from flask import Blueprint, request, jsonify

from healthtools.search import run_query

blueprint = Blueprint('search_api', __name__)
log = logging.getLogger(__name__)

@blueprint.route('/search', methods=['GET'], strict_slashes=False)
@blueprint.route('/search/<doc_type>', methods=['GET'], strict_slashes=False)
def index(doc_type=None):
query = request.args.get('q')

result, doc_type = run_query(query, doc_type)
try:
result, doc_type = run_query(query, doc_type)
response = jsonify({
'result': result,
'doc_type': doc_type,
'status': 'OK'
})

# Error with run_query (run_query returns false)
if not result:
return jsonify({
except Exception as err:
response = jsonify({
'result': {'hits': [], 'total': 0},
'doc_type': doc_type,
'status': 'FAILED',
'msg': '' # TODO: Pass run_query message here.
})
log.error('Search failed \n' + str(err))

# TODO: Log event here (send to Google Analytics)

return jsonify({'result': result, 'doc_type': doc_type, 'status': 'OK'})
return response

0 comments on commit 09df6c0

Please sign in to comment.